hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
487c49f921ee4340fdfc140e8ff73bccf0d40cf6 | 3,273 | py | Python | test/broken_test_log.py | Brimizer/python-ant | 2b99693b4754156d401a0bd90e02357e8358c1f5 | [
"MIT"
]
| null | null | null | test/broken_test_log.py | Brimizer/python-ant | 2b99693b4754156d401a0bd90e02357e8358c1f5 | [
"MIT"
]
| null | null | null | test/broken_test_log.py | Brimizer/python-ant | 2b99693b4754156d401a0bd90e02357e8358c1f5 | [
"MIT"
]
| 1 | 2019-01-11T22:22:06.000Z | 2019-01-11T22:22:06.000Z | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2011, Martín Raúl Villalba
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
##############################################################################
LOG_LOCATION = '/tmp/python-ant.logtest.ant'
import unittest
from ant.core.log import *
class LogReaderTest(unittest.TestCase):
def setUp(self):
lw = LogWriter(LOG_LOCATION)
lw.logOpen()
lw.logRead(b'\x01')
lw.logWrite(b'\x00')
lw.logRead(b'TEST')
lw.logClose()
lw.close()
self.log = LogReader(LOG_LOCATION)
def test_open_close(self):
self.assertTrue(self.log.is_open)
self.log.close()
self.assertFalse(self.log.is_open)
self.log.open(LOG_LOCATION)
self.assertTrue(self.log.is_open)
def test_read(self):
t1 = self.log.read()
t2 = self.log.read()
t3 = self.log.read()
t4 = self.log.read()
t5 = self.log.read()
self.assertEquals(self.log.read(), '')
self.assertEquals(t1[0], EVENT_OPEN)
self.assertTrue(isinstance(t1[1], int))
self.assertEquals(len(t1), 2)
self.assertEquals(t2[0], EVENT_READ)
self.assertTrue(isinstance(t1[1], int))
self.assertEquals(len(t2), 3)
self.assertEquals(t2[2], b'\x01')
self.assertEquals(t3[0], EVENT_WRITE)
self.assertTrue(isinstance(t1[1], int))
self.assertEquals(len(t3), 3)
self.assertEquals(t3[2], '\x00')
self.assertEquals(t4[0], EVENT_READ)
self.assertEquals(t4[2], 'TEST')
self.assertEquals(t5[0], EVENT_CLOSE)
self.assertTrue(isinstance(t1[1], int))
self.assertEquals(len(t5), 2)
class LogWriterTest(unittest.TestCase):
def setUp(self):
self.log = LogWriter(LOG_LOCATION)
def test_open_close(self):
self.assertTrue(self.log.is_open)
self.log.close()
self.assertFalse(self.log.is_open)
self.log.open(LOG_LOCATION)
self.assertTrue(self.log.is_open)
def test_log(self):
# Redundant, any error in log* methods will cause the LogReader test
# suite to fail.
pass
| 33.397959 | 78 | 0.633364 | 1,890 | 0.577099 | 0 | 0 | 0 | 0 | 0 | 0 | 1,419 | 0.433282 |
487d11f426857e94a16bf2d204d4f9c9755beb01 | 3,181 | py | Python | python/GafferSceneUI/LightToCameraUI.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
]
| 561 | 2016-10-18T04:30:48.000Z | 2022-03-30T06:52:04.000Z | python/GafferSceneUI/LightToCameraUI.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
]
| 1,828 | 2016-10-14T19:01:46.000Z | 2022-03-30T16:07:19.000Z | python/GafferSceneUI/LightToCameraUI.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
]
| 120 | 2016-10-18T15:19:13.000Z | 2021-12-20T16:28:23.000Z | ##########################################################################
#
# Copyright (c) 2016, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferScene
import GafferSceneUI
def filmFitMetadata():
# Take the metadata from StandardOptionsUI, except not the layout section
allOptions = GafferSceneUI.StandardOptionsUI.plugsMetadata[ "options.filmFit" ] + GafferSceneUI.StandardOptionsUI.plugsMetadata[ "options.filmFit.value" ]
optionPairs = zip( allOptions[::2], allOptions[1::2] )
return sum( [ [i,j] for i,j in optionPairs if i != "layout:section" ], [] )
Gaffer.Metadata.registerNode(
GafferScene.LightToCamera,
"description",
"""
Converts lights into cameras. Spotlights are converted to a perspective
camera with the field of view matching the cone angle, and distant lights are
converted to an orthographic camera.
""",
plugs = {
"filmFit" : filmFitMetadata(),
"distantAperture" : [
"description",
"""
The orthographic aperture used when converting distant lights
( which are theoretically infinite in extent )
""",
],
"clippingPlanes" : [
"description",
"""
Clipping planes for the created cameras. When creating a perspective camera, a near clip
<= 0 is invalid, and will be replaced with 0.01. Also, certain lights only start casting
light at some distance - if near clip is less than this, it will be increased.
""",
],
"filter" : [
"description",
"""
Specifies which lights to convert.
""",
],
}
)
| 33.840426 | 155 | 0.690663 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,647 | 0.832128 |
487fc233ae954bd5e3c1f681b3650b436f4025ac | 2,678 | py | Python | eccProg.py | ganey/hm-gwmfr | c931113185a222d6c94c6a2679d39027de6f192f | [
"MIT"
]
| 1 | 2021-06-18T02:57:19.000Z | 2021-06-18T02:57:19.000Z | eccProg.py | ganey/hm-gwmfr | c931113185a222d6c94c6a2679d39027de6f192f | [
"MIT"
]
| 9 | 2021-03-04T10:41:52.000Z | 2021-10-18T15:14:37.000Z | eccProg.py | ganey/hm-gwmfr | c931113185a222d6c94c6a2679d39027de6f192f | [
"MIT"
]
| 4 | 2021-02-24T10:03:31.000Z | 2021-07-09T17:33:59.000Z | #!/usr/bin/env python3
from time import sleep
import logging
import os
import subprocess
print("Nebra ECC Tool")
preTestFail = 0
afterTestFail = 0
ECC_SUCCESSFUL_TOUCH_FILEPATH = "/var/data/gwmfr_ecc_provisioned"
logging.basicConfig(level=os.environ.get("LOGLEVEL", "DEBUG"))
def record_successful_provision():
logging.debug("ECC provisioning complete")
# Via: https://stackoverflow.com/questions/12654772/create-empty-file-using-python/12654798
# because path lib not included in python3-minimal
# https://stackoverflow.com/questions/1158076/implement-touch-using-python
open(ECC_SUCCESSFUL_TOUCH_FILEPATH, 'a').close()
logging.debug("ECC provisioning recorded. Touched to %s" % ECC_SUCCESSFUL_TOUCH_FILEPATH)
while preTestFail < 10:
preTest = subprocess.run(["/opt/gateway_mfr/bin/gateway_mfr", "ecc", "onboarding"], capture_output=True)
preTestResult = str(preTest.stdout.decode('ascii')).rstrip()
if "not responding to pings" not in preTestResult:
break
else:
print("Can't load provisioning tool, retrying")
preTestFail += 1
sleep(2)
if "ecc_response_exec_error" in preTestResult:
print("Provisioning")
while afterTestFail < 5:
subprocess.run(["/opt/gateway_mfr/bin/gateway_mfr", "ecc", "provision"])
print("Testing")
afterTest = subprocess.run(["/opt/gateway_mfr/bin/gateway_mfr", "ecc", "onboarding"], capture_output=True).stdout
afterTestResult = str(afterTest.decode('ascii')).rstrip()
print(afterTestResult)
if "ecc_response_exec_error" in afterTestResult:
print("\033[91mProgramming FAILED\033[0m")
print("Retrying provisioning")
afterTestFail += 1
sleep(2)
elif (len(afterTestResult) == 51 or len(afterTestResult) == 52):
print("\033[92mProgramming Success!\033[0m")
record_successful_provision()
break
else:
print("\033[91mAn Unknown Error Occured\033[0m")
print("Retrying provisioning")
afterTestFail += 1
sleep(2)
elif (len(preTestResult) == 50 or len(preTestResult) == 51 or len(preTestResult) == 52):
print("\033[93mKey Already Programmed\033[0m")
print(preTestResult)
record_successful_provision()
else:
print("An Unknown Error Occured")
print(preTestResult)
# This next bit of mank is so we can run the gwmfr container for longer
# by providing the OVERRIDE_GWMFR_EXIT environment variable for trouble
# shooting purposes.
if os.getenv('OVERRIDE_GWMFR_EXIT', None):
while(True):
print("GWMFR Utility Exit Overriden")
sleep(300)
| 33.898734 | 121 | 0.68596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,116 | 0.416729 |
4880ecda66e3c2c409be46833975599cd4502de6 | 641 | py | Python | doc/tutorial/getargs.py | OliverTED/doit | a6f75f312390aba352c3f00680cd32609323dbc2 | [
"MIT"
]
| null | null | null | doc/tutorial/getargs.py | OliverTED/doit | a6f75f312390aba352c3f00680cd32609323dbc2 | [
"MIT"
]
| 1 | 2018-10-02T19:28:08.000Z | 2018-10-02T19:28:08.000Z | doc/tutorial/getargs.py | smheidrich/doit | 1f9c3c755c96508ca2b1b2668f102f9d2da9c614 | [
"MIT"
]
| null | null | null | DOIT_CONFIG = {'default_tasks': ['use_cmd', 'use_python']}
def task_compute():
def comp():
return {'x':5,'y':10, 'z': 20}
return {'actions': [(comp,)]}
def task_use_cmd():
return {'actions': ['echo x=%(x)s, z=%(z)s'],
'getargs': {'x': ('compute', 'x'),
'z': ('compute', 'z')},
'verbosity': 2,
}
def task_use_python():
return {'actions': [show_getargs],
'getargs': {'x': ('compute', 'x'),
'y': ('compute', 'z')},
'verbosity': 2,
}
def show_getargs(x, y):
print "this is x:%s" % x
print "this is y:%s" % y
| 24.653846 | 58 | 0.452418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.347894 |
48823dc4e90bc1cf4bd23d8ad54e5dbd5c6870e3 | 4,022 | py | Python | examples/run_unlinkable.py | danesjenovdan/reference_implementation | 8374c45dd05a2e279bd5e53800aa237f64684a2b | [
"Apache-2.0"
]
| 1 | 2020-11-09T21:07:00.000Z | 2020-11-09T21:07:00.000Z | examples/run_unlinkable.py | ac-ctrl/reference_implementation | e07f10447eaab9853e8f45a5d06e98741e265f5f | [
"Apache-2.0"
]
| null | null | null | examples/run_unlinkable.py | ac-ctrl/reference_implementation | e07f10447eaab9853e8f45a5d06e98741e265f5f | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
""" Simple example/demo of the unlinkable DP-3T design
This demo simulates some interactions between two phones,
represented by the contact tracing modules, and then runs
contact tracing.
"""
__copyright__ = """
Copyright 2020 EPFL
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__ = "Apache 2.0"
from datetime import timedelta
from dp3t.protocols.unlinkable import ContactTracer, TracingDataBatch
def report_broadcasted_ephids(name, app):
"""
Convenience function to report some broadcasted EphIDs
"""
reporting_time = app.start_of_today + timedelta(hours=10)
ephid = app.get_ephid_for_time(reporting_time)
print("At {}: {} broadcasts {}".format(reporting_time.time(), name, ephid.hex()))
def report_day(time):
"""
Convenience function to report start of the day
"""
print("---- {} ----".format(time))
def process_single_day(alice, bob, interaction_time=None):
"""
Convenience function, process and report on a single day
"""
report_day(alice.today)
report_broadcasted_ephids("Alice", alice)
report_broadcasted_ephids("Bob", bob)
if interaction_time:
print("Alice and Bob interact:")
ephid_bob = bob.get_ephid_for_time(interaction_time)
alice.add_observation(ephid_bob, interaction_time)
print(" Alice observes Bob's EphID {}".format(ephid_bob.hex()))
ephid_alice = alice.get_ephid_for_time(interaction_time)
bob.add_observation(ephid_alice, interaction_time)
print(" Bob observes Alice's EphID {}".format(ephid_alice.hex()))
else:
print("Alice and Bob do not interact")
# Advance to the next day
alice.next_day()
bob.next_day()
print("")
def main():
alice = ContactTracer()
bob = ContactTracer()
### Interaction ###
process_single_day(alice, bob)
process_single_day(alice, bob)
interaction_time = alice.start_of_today + timedelta(hours=10)
bob_contagious_start = bob.start_of_today
process_single_day(alice, bob, interaction_time)
print("... skipping 3 days ...\n")
for _ in range(4):
alice.next_day()
bob.next_day()
### Diagnosis and reporting ###
report_day(alice.today)
print("Bob is diagnosed with SARS-CoV-2")
print(
"Doctor establishes that Bob started being contagious at {}".format(
bob_contagious_start
)
)
print("And that Bob was contagious for 3 days")
bob_contagious_end = bob_contagious_start + timedelta(days=3)
print("\n[Bob -> Server] Bob sends:")
tracing_info_bob = bob.get_tracing_information(
bob_contagious_start, bob_contagious_end
)
print(
" * his seeds for the time period {} to {}".format(
bob_contagious_start, bob_contagious_end
)
)
print(" * and the corresponding epochs\n")
### Contact tracing ###
print("[Server] Compiles download batch by:")
print(" * Computing hashed observations given the seeds")
print(" * Inserts these into a cuckoo filter\n")
batch = TracingDataBatch([tracing_info_bob])
print("[Server -> Alice] Alice receives batch")
print(" * Alice checks if she was in contact with an infected person")
if alice.matches_with_batch(batch) > 0:
print(" * CORRECT: Alice's phone concludes she is at risk")
else:
print(" * ERROR: Alice's phone does not conclude she is at risk")
if __name__ == "__main__":
main()
| 30.014925 | 85 | 0.680507 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,922 | 0.477872 |
4883b0040e8dc5ec47ef273298b6d359bf3bcacc | 2,409 | py | Python | EasyRecycle/tests/unittests/core/views/test_BecomeCommercialAPIView.py | YuriyLisovskiy/EasyRecycle | 49f1b84931145a3e95224e411d22ed7701e5bfe0 | [
"MIT"
]
| null | null | null | EasyRecycle/tests/unittests/core/views/test_BecomeCommercialAPIView.py | YuriyLisovskiy/EasyRecycle | 49f1b84931145a3e95224e411d22ed7701e5bfe0 | [
"MIT"
]
| null | null | null | EasyRecycle/tests/unittests/core/views/test_BecomeCommercialAPIView.py | YuriyLisovskiy/EasyRecycle | 49f1b84931145a3e95224e411d22ed7701e5bfe0 | [
"MIT"
]
| null | null | null | from django.urls import reverse
from rest_framework import status
from rest_framework.test import force_authenticate
from rest_framework_simplejwt.state import User
from core.views import DeactivateSelfAPIView, BecomeCommercialAPIView
from tests.unittests.common import APIFactoryTestCase
class BecomeCommercialAPITestCase(APIFactoryTestCase):
def setUp(self) -> None:
super(BecomeCommercialAPITestCase, self).setUp()
self.view = BecomeCommercialAPIView.as_view()
self.user = User.objects.get(username='User')
self.user_2 = User.objects.get(username='User2')
self.user_3 = User.objects.get(username='User3')
self.commercial_user = User.objects.get(username='Commercial')
def test_BecomeCommercialValid(self):
request = self.request_factory.put(reverse('api_v1:core:become_commercial'), {
'password': 'qwerty'
})
force_authenticate(request, self.user)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(User.objects.get(username='User').is_commercial)
def test_BecomeCommercialInvalid(self):
request = self.request_factory.put(reverse('api_v1:core:become_commercial'), {
'password': 'qerty'
})
force_authenticate(request, self.user)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_BecomeCommercialUnauthenticated(self):
request = self.request_factory.put(reverse('api_v1:core:become_commercial'), {
'password': 'qwerty'
})
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_BecomeCommercialNoData(self):
request = self.request_factory.put(reverse('api_v1:core:become_commercial'))
force_authenticate(request, self.user)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_BecomeCommercialAlreadyCommercial(self):
request = self.request_factory.put(reverse('api_v1:core:become_commercial'), {
'password': 'qwerty'
})
force_authenticate(request, self.commercial_user)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
| 42.263158 | 86 | 0.71565 | 2,116 | 0.878373 | 0 | 0 | 0 | 0 | 0 | 0 | 264 | 0.109589 |
48847182743877e2207fafdce5ef0b412ca3a606 | 55,929 | py | Python | colour/colorimetry/tests/test_spectrum.py | aurelienpierre/colour | 3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47 | [
"BSD-3-Clause"
]
| null | null | null | colour/colorimetry/tests/test_spectrum.py | aurelienpierre/colour | 3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47 | [
"BSD-3-Clause"
]
| null | null | null | colour/colorimetry/tests/test_spectrum.py | aurelienpierre/colour | 3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47 | [
"BSD-3-Clause"
]
| null | null | null | """Defines the unit tests for the :mod:`colour.colorimetry.spectrum` module."""
import colour
import numpy as np
import unittest
import scipy
from distutils.version import LooseVersion
from colour.algebra import CubicSplineInterpolator
from colour.colorimetry.spectrum import SPECTRAL_SHAPE_DEFAULT
from colour.colorimetry.spectrum import (
SpectralShape,
SpectralDistribution,
MultiSpectralDistributions,
reshape_sd,
reshape_msds,
sds_and_msds_to_sds,
sds_and_msds_to_msds,
)
from colour.hints import Dict, Tuple
from colour.utilities import tstack
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = [
"DATA_SAMPLE",
"DATA_SAMPLE_NON_UNIFORM",
"DATA_SAMPLE_INTERPOLATED",
"DATA_SAMPLE_INTERPOLATED_NON_UNIFORM",
"DATA_SAMPLE_NORMALISED",
"DATA_STANDARD_OBSERVER_2_DEGREE_CIE1931",
"DATA_CMFS",
"DATA_SAMPLE_ABRIDGED",
"DATA_MULTI_SAMPLE_ABRIDGED",
"TestSpectralShape",
"TestSpectralDistribution",
"TestMultiSpectralDistributions",
"TestReshapeSd",
"TestSdsAndMdsToSds",
"TestSdsAndMsdsToMsds",
]
DATA_SAMPLE: Dict = {
340: 0.0000,
360: 0.0000,
380: 0.0000,
400: 0.0641,
420: 0.0645,
440: 0.0562,
460: 0.0537,
480: 0.0559,
500: 0.0651,
520: 0.0705,
540: 0.0772,
560: 0.0870,
580: 0.1128,
600: 0.1360,
620: 0.1511,
640: 0.1688,
660: 0.1996,
680: 0.2397,
700: 0.2852,
720: 0.0000,
740: 0.0000,
760: 0.0000,
780: 0.0000,
800: 0.0000,
820: 0.0000,
}
DATA_SAMPLE_NON_UNIFORM: Dict = {
391.898: 16.331740,
392.069: 16.333122,
405.606: 40.197224,
406.794: 39.923366,
406.891: 39.924098,
407.026: 39.925138,
416.286: 40.064293,
418.690: 40.009950,
426.700: 18.045809,
426.726: 18.045986,
432.556: 38.435883,
464.742: 29.534647,
465.025: 29.534647,
465.147: 29.534647,
466.586: 38.226470,
477.175: 7.487795,
493.205: 7.684766,
505.217: 7.684766,
513.294: 20.701285,
513.328: 20.704211,
514.349: 20.704211,
514.516: 20.709788,
515.109: 20.709788,
538.034: 7.684766,
564.807: 20.704211,
566.247: 20.709788,
569.592: 32.103387,
580.133: 37.548490,
581.198: 37.548490,
582.642: 40.197224,
588.977: 18.045986,
589.159: 18.045809,
600.113: 8.643020,
600.603: 8.647157,
600.718: 8.640394,
601.068: 8.640394,
601.322: 8.647157,
601.484: 8.643020,
657.805: 14.448826,
658.288: 14.448826,
658.761: 8.537097,
674.438: 38.22647,
678.390: 20.709788,
703.725: 38.435883,
711.318: 8.647157,
711.519: 8.640394,
711.563: 22.532398,
711.699: 8.647157,
711.990: 22.536906,
723.132: 16.33174,
723.642: 16.333122,
761.265: 41.342187,
786.089: 8.850659,
805.862: 8.850659,
}
DATA_SAMPLE_INTERPOLATED: Tuple = (
0.000000000000000,
0.000230709627131,
0.000384144814593,
0.000507137093115,
0.000632114832536,
0.000778810112328,
0.000955965592105,
0.001163041382140,
0.001391921913876,
0.001628622810444,
0.001854997757177,
0.002050445372122,
0.002193616076555,
0.002264118965498,
0.002244228678230,
0.002120592268802,
0.001885936076555,
0.001540772596628,
0.001095107350478,
0.000570145756392,
0.000000000000000,
-0.000581325882457,
-0.001183945630981,
-0.001820338942229,
-0.002492278660287,
-0.003192248184715,
-0.003904858878589,
-0.004608267476544,
-0.005275593492823,
-0.005876336629317,
-0.006377794183612,
-0.006746478457031,
-0.006949534162679,
-0.006956155833489,
-0.006739005230263,
-0.006275628749720,
-0.005549874832536,
-0.004553311371393,
-0.003286643119019,
-0.001761129096236,
0.000000000000000,
0.001979832128906,
0.004221516875000,
0.006762744980469,
0.009617760000000,
0.012780285644531,
0.016226453125000,
0.019917728496094,
0.023803840000000,
0.027825705410156,
0.031918359375000,
0.036013880761719,
0.040044320000000,
0.043944626425781,
0.047655575625000,
0.051126696777344,
0.054319200000000,
0.057208903691406,
0.059789161875000,
0.062073791542969,
0.064100000000000,
0.065908998066406,
0.067477265625000,
0.068776060136719,
0.069791040000000,
0.070520520019531,
0.070973726875000,
0.071169054589844,
0.071132320000000,
0.070895018222656,
0.070492578125000,
0.069962617792969,
0.069343200000000,
0.068671087675781,
0.067979999375000,
0.067298864746094,
0.066650080000000,
0.066047763378906,
0.065496010625000,
0.064987150449219,
0.064500000000000,
0.064007805449219,
0.063510900625000,
0.063016138378906,
0.062528640000000,
0.062051989746094,
0.061588429375000,
0.061139052675781,
0.060704000000000,
0.060282652792969,
0.059873828125000,
0.059475973222656,
0.059087360000000,
0.058706279589844,
0.058331236875000,
0.057961145019531,
0.057595520000000,
0.057234675136719,
0.056879915625000,
0.056533733066406,
0.056200000000000,
0.055883811757812,
0.055589516250000,
0.055320009023437,
0.055076960000000,
0.054860961914063,
0.054671678750000,
0.054507994179687,
0.054368160000000,
0.054249944570312,
0.054150781250000,
0.054067916835938,
0.053998560000000,
0.053940029726562,
0.053889903750000,
0.053846166992187,
0.053807360000000,
0.053772727382813,
0.053742366250000,
0.053717374648438,
0.053700000000000,
0.053692483144531,
0.053693175625000,
0.053700270058594,
0.053712960000000,
0.053731311035156,
0.053756131875000,
0.053788845449219,
0.053831360000000,
0.053885940175781,
0.053955078125000,
0.054041364589844,
0.054147360000000,
0.054275465566406,
0.054427794375000,
0.054606042480469,
0.054811360000000,
0.055044222207031,
0.055304300625000,
0.055590334121094,
0.055900000000000,
0.056231407851562,
0.056587597500000,
0.056971279335937,
0.057383360000000,
0.057823168945313,
0.058288685000000,
0.058776762929688,
0.059283360000000,
0.059803762539063,
0.060332812500000,
0.060865134023438,
0.061395360000000,
0.061918358632813,
0.062429460000000,
0.062924682617188,
0.063400960000000,
0.063856367226563,
0.064290347500000,
0.064703938710938,
0.065100000000000,
0.065481647265625,
0.065846786250000,
0.066192982265625,
0.066519040000000,
0.066824853515625,
0.067111256250000,
0.067379871015625,
0.067632960000000,
0.067873274765625,
0.068103906250000,
0.068328134765625,
0.068549280000000,
0.068770551015625,
0.068994896250000,
0.069224853515625,
0.069462400000000,
0.069708802265625,
0.069964466250000,
0.070228787265625,
0.070500000000000,
0.070776270703125,
0.071059446250000,
0.071351597578125,
0.071653920000000,
0.071966845703125,
0.072290156250000,
0.072623095078125,
0.072964480000000,
0.073312815703125,
0.073666406250000,
0.074023467578125,
0.074382240000000,
0.074741100703125,
0.075098676250000,
0.075453955078125,
0.075806400000000,
0.076156060703125,
0.076503686250000,
0.076850837578125,
0.077200000000000,
0.077552701992188,
0.077904262500000,
0.078250762070313,
0.078590880000000,
0.078925561523438,
0.079257685000000,
0.079591729101562,
0.079933440000000,
0.080289498554688,
0.080667187500000,
0.081074058632813,
0.081517600000000,
0.082004903085938,
0.082542330000000,
0.083135180664063,
0.083787360000000,
0.084501045117188,
0.085276352500000,
0.086111005195313,
0.087000000000000,
0.087938453242188,
0.088930890000000,
0.089981833007812,
0.091092960000000,
0.092263452148438,
0.093490342500000,
0.094768864414062,
0.096092800000000,
0.097454828554687,
0.098846875000000,
0.100260458320312,
0.101687040000000,
0.103118372460937,
0.104546847500000,
0.105965844726563,
0.107370080000000,
0.108755953867188,
0.110121900000000,
0.111468733632812,
0.112800000000000,
0.114120657988281,
0.115431176875000,
0.116730532871094,
0.118017600000000,
0.119291174316406,
0.120549998125000,
0.121792784199219,
0.123018240000000,
0.124225091894531,
0.125412109375000,
0.126578129277344,
0.127722080000000,
0.128843005722656,
0.129940090625000,
0.131012683105469,
0.132060320000000,
0.133082750800781,
0.134079961875000,
0.135052200683594,
0.136000000000000,
0.136923531484375,
0.137820920000000,
0.138690739765625,
0.139532640000000,
0.140347216796875,
0.141135885000000,
0.141900750078125,
0.142644480000000,
0.143370177109375,
0.144081250000000,
0.144781285390625,
0.145473920000000,
0.146162712421875,
0.146851015000000,
0.147541845703125,
0.148237760000000,
0.148940722734375,
0.149651980000000,
0.150371931015625,
0.151100000000000,
0.151834687363281,
0.152574745625000,
0.153319862089844,
0.154070560000000,
0.154828088378906,
0.155594311875000,
0.156371600605469,
0.157162720000000,
0.157970720644531,
0.158798828125000,
0.159650332871094,
0.160528480000000,
0.161436359160156,
0.162376794375000,
0.163352233886719,
0.164364640000000,
0.165415378925781,
0.166505110625000,
0.167633678652344,
0.168800000000000,
0.170002988242187,
0.171244585000000,
0.172526722382812,
0.173850400000000,
0.175215795898437,
0.176622377500000,
0.178069012539063,
0.179554080000000,
0.181075581054688,
0.182631250000000,
0.184218665195313,
0.185835360000000,
0.187478933710938,
0.189147162500000,
0.190838110351563,
0.192550240000000,
0.194282523867188,
0.196034555000000,
0.197806658007813,
0.199600000000000,
0.201405046894531,
0.203174116875000,
0.204868198964844,
0.206468000000000,
0.207971350097656,
0.209390608125000,
0.210750067167969,
0.212083360000000,
0.213430864550781,
0.214837109375000,
0.216348179121094,
0.218009120000000,
0.219861345253906,
0.221940040625000,
0.224271569824219,
0.226870880000000,
0.229738907207031,
0.232859981875000,
0.236199234277344,
0.239700000000000,
0.243337282929688,
0.247262056250000,
0.251598942851562,
0.256394240000000,
0.261625952148438,
0.267213823750000,
0.273029372070313,
0.278905920000000,
0.284648628867188,
0.290044531250000,
0.294872563789062,
0.298913600000000,
0.301960483085938,
0.303828058750000,
0.304363208007813,
0.303454880000000,
0.301044124804688,
0.297134126250000,
0.291800234726562,
0.285200000000000,
0.277470396855469,
0.268408756875000,
0.257826504003906,
0.245651040000000,
0.231911267089844,
0.216723110625000,
0.200275041738281,
0.182813600000000,
0.164628916074219,
0.146040234375000,
0.127381435722656,
0.108986560000000,
0.091175328808594,
0.074238668125000,
0.058424230957031,
0.043921920000000,
0.030849410292969,
0.019237671875000,
0.009016492441406,
0.000000000000000,
-0.008014721386719,
-0.014901410625000,
-0.020510217441406,
-0.024767360000000,
-0.027665856933594,
-0.029256259375000,
-0.029637382988281,
-0.028947040000000,
-0.027352771230469,
-0.025042578125000,
-0.022215654785156,
-0.019073120000000,
-0.015808749277344,
-0.012599706875000,
-0.009597277832031,
-0.006917600000000,
-0.004632396074219,
-0.002759705625000,
-0.001254617128906,
0.000000000000000,
0.001146203203125,
0.002165737500000,
0.003010363984375,
0.003650560000000,
0.004073291015625,
0.004279782500000,
0.004283291796875,
0.004106880000000,
0.003781183828125,
0.003342187500000,
0.002828994609375,
0.002281600000000,
0.001738661640625,
0.001235272500000,
0.000800732421875,
0.000456320000000,
0.000213064453125,
0.000069517500000,
0.000009525234375,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
)
DATA_SAMPLE_INTERPOLATED_NON_UNIFORM: Tuple = (
16.329808636577400,
16.722487609243078,
17.780769796558388,
19.388440919210822,
21.429286697887836,
23.787092853276910,
26.345645106065515,
28.988729176941121,
31.600130786591194,
34.063635655703216,
36.263029504964656,
38.082098055062993,
39.404627026685688,
40.114402140520198,
40.111247588026082,
39.925103635141149,
39.890167058876870,
39.799323872976096,
39.680556569501256,
39.561845869769570,
39.471172495098209,
39.436517166804364,
39.485860606205222,
39.647183534617994,
39.948466673359860,
40.374181475729280,
40.480943048717741,
39.587763146544347,
37.399010971904012,
34.259517350145224,
30.574008069142096,
26.747208916768866,
23.183845680899680,
20.288644149408725,
18.466330110170176,
18.131455040802454,
19.618408431271092,
22.601624593221480,
26.621530082560952,
31.218551455196845,
35.933115267036499,
40.312773209586069,
44.121677838324345,
47.374686358105983,
50.100647613008405,
52.328410447109043,
54.086823704485290,
55.404736229214613,
56.310996865374428,
56.834454457042114,
57.003957848295137,
56.848355883210893,
56.396497405866839,
55.677231260340363,
54.719406290708918,
53.551871341049903,
52.203475255440743,
50.703066877958868,
49.079495052681693,
47.361608623686649,
45.578256435051173,
43.758287330852653,
41.930550155168532,
40.123893752076235,
38.367166965653183,
36.689218639976801,
35.118897619124496,
33.685052747173714,
32.416532868201863,
31.342186826286373,
30.490863465504646,
29.891411629934137,
29.572680163652254,
29.538621481156376,
33.594913456316092,
41.048330705400552,
45.551744552213606,
47.142069193311166,
46.264235269850914,
43.363173422990592,
38.883814293887831,
33.271088523700378,
26.969926753585888,
20.425259624702068,
14.082017778206605,
8.385131855257200,
3.728429425651282,
0.156961054721180,
-2.429909888236355,
-4.133307786198728,
-5.054357022143353,
-5.294181979047651,
-4.953907039889023,
-4.134656587644884,
-2.937555005292646,
-1.463726675809728,
0.185704017826468,
1.909612692638526,
3.606874965649032,
5.176366453880572,
6.516962774355742,
7.527539544097118,
8.122379803803623,
8.331483144611147,
8.237444938631239,
7.923124732066758,
7.471382071120567,
6.965076501995521,
6.487067570894484,
6.120214824020312,
5.947377807575871,
6.051416067764015,
6.515189150787608,
7.421556602849507,
8.833103319101975,
10.654113909347958,
12.714472051171430,
14.843629856075644,
16.871039435563851,
18.626152901139314,
19.938422364305275,
20.637299936564997,
20.706835013628812,
20.713132276905259,
20.620587314150111,
20.396819412610807,
20.054405097058748,
19.605938071902443,
19.064012041550384,
18.441220710411045,
17.750157782892924,
17.003416963404511,
16.213591956354293,
15.393276466150766,
14.555064197202412,
13.711548853917725,
12.875324140705192,
12.058983761973309,
11.275121422130562,
10.536330825585436,
9.855205676746426,
9.244339680022023,
8.716326539820709,
8.283759960550988,
7.959233646621334,
7.755341302440248,
7.684676632416213,
7.756084346822568,
7.961801724157814,
8.289468950531905,
8.726726048591537,
9.261213040983417,
9.880569950354237,
10.572436799350699,
11.324453610619505,
12.124260406807357,
12.959497210560945,
13.817804044526978,
14.686820931352152,
15.554187893683173,
16.407544954166731,
17.234532135449530,
18.022789460178267,
18.759956950999651,
19.433674630560375,
20.031582521507140,
20.541320646486646,
20.950529028145592,
21.246847689130675,
21.417916652088600,
21.451375939666065,
21.334865574509770,
21.056025579266414,
20.604750276779001,
20.494345115463702,
22.221705466331883,
25.661450384670388,
29.775598875658655,
33.531925166546948,
36.383260993760757,
38.399428506854193,
39.690190939354423,
40.365311524788659,
40.534553496684069,
40.307680088567857,
39.794454533967205,
39.104640066409281,
38.347999919421312,
37.634297326530451,
37.374685121273394,
39.119690201154867,
40.253206513156250,
38.299118522290833,
34.130306343480264,
28.883500017392159,
23.695429584694160,
19.702825086053920,
18.042370406596557,
17.942146149526579,
17.437952042997388,
16.603615436175311,
15.527509997088327,
14.298009393764401,
13.003487294231499,
11.732317366517600,
10.572873278650665,
9.613528698658669,
8.942657294569587,
8.648632734411382,
8.638275005914387,
8.608461770725761,
8.468358277399256,
8.236485894689265,
7.918459394063972,
7.519893546991550,
7.046403124940173,
6.503602899378026,
5.897107641773280,
5.232532123594114,
4.515491116308703,
3.751599391385226,
2.946471720291860,
2.105722874496783,
1.234967625468170,
0.339820744674198,
-0.574102996416954,
-1.501188826337108,
-2.435821973618090,
-3.372387666791723,
-4.305271134389827,
-5.228857604944231,
-6.137532306986749,
-7.025680469049206,
-7.887687319663434,
-8.717938087361247,
-9.510818000674471,
-10.260712288134929,
-10.962006178274445,
-11.609084899624833,
-12.196333680717927,
-12.718137750085543,
-13.168882336259514,
-13.542952667771653,
-13.834733973153785,
-14.038611480937735,
-14.148970419655322,
-14.160196017838375,
-14.066673504018716,
-13.862788106728164,
-13.542925054498539,
-13.101469575861673,
-12.532806899349385,
-11.831322253493493,
-10.991400866825831,
-10.007427967878208,
-8.873788785182459,
-7.584868547270405,
-6.135052482673858,
-4.518725819924658,
-2.730273787554616,
-0.764081614095559,
1.385465471920696,
3.723982241962316,
6.257083467497486,
8.990383919994379,
11.929498370921175,
14.976162260830137,
5.084237255461949,
-6.729230790271000,
-14.622433089729542,
-19.038838171389944,
-20.421914563728482,
-19.215130795221405,
-15.861955394344996,
-10.805856889575507,
-4.490303809389212,
2.641235317737622,
10.145291963328749,
17.578397598907866,
24.497083695998736,
30.457881726125084,
35.017323160810655,
37.731939471579153,
38.169944675202451,
36.103648065327157,
31.478944095141092,
24.247257545409393,
14.377470469911161,
2.089605892978410,
-12.208009328653077,
-28.102486071018522,
-45.180935210153116,
-63.030467622092019,
-81.238194182870473,
-99.391225768523640,
-117.076673255086732,
-133.881647518594917,
-149.393259435083451,
-163.198619880587444,
-174.884839731142108,
-184.039029862782684,
-190.248301151544382,
-193.099764473462415,
-192.180530704571822,
-187.077710720907902,
-177.378415398505837,
-162.669755613400923,
-142.538842241628146,
-116.572786159222886,
-84.358698242220214,
-45.483689366655454,
0.465129591436323,
53.867241377821607,
111.906385750398641,
165.969680931356891,
206.832112792864763,
225.268667207090687,
212.054330046203120,
157.964087182370434,
53.772924487761195,
24.128558965593012,
155.948189345952358,
238.667542187567193,
279.823268952005947,
286.952021100836475,
267.590450095627091,
229.275207397946076,
179.542944469361430,
125.930312771441450,
75.973963765754249,
37.210548913868003,
17.176719677350921,
17.152394375030820,
19.379794324130732,
21.517434431022508,
23.565545988087592,
25.524360287707406,
27.394108622263396,
29.175022284136993,
30.867332565709628,
32.471270759362753,
33.987068157477793,
35.414956052436175,
36.755165736619354,
38.007928502408760,
39.173475642185821,
40.252038448331973,
41.243848213228674,
42.149136229257330,
42.968133788799399,
43.701072184236310,
44.348182707949491,
44.909696652320392,
45.385845309730442,
45.776859972561077,
46.082971933193733,
46.304412484009845,
46.441412917390849,
46.494204525718175,
46.463018601373285,
46.348086436737596,
46.149639324192542,
45.867908556119552,
45.503125424900077,
45.055521222915552,
44.525327242547419,
43.912774776177095,
43.218095116186007,
42.441519554955640,
41.583279384867382,
40.643991169034841,
39.627797988936805,
38.540734852902020,
37.388854826093969,
36.178210973676094,
34.914856360811882,
33.604844052664760,
32.254227114398248,
30.869058611175774,
29.455391608160820,
28.019279170516839,
26.566774363407301,
25.103930251995678,
23.636799901445418,
22.171436376920010,
20.713892743582907,
19.270222066597562,
17.846477411127466,
16.448711842336071,
15.082978425386836,
13.755330225443233,
12.471820307668732,
11.238501737226787,
10.061427579280879,
8.946650898994456,
7.900224761530997,
6.928202232053956,
6.036636375726809,
5.231580257713018,
4.519086943176044,
3.905209497279358,
3.396000985186418,
2.997514472060695,
2.715803023065654,
2.556919703364760,
2.526917578121476,
2.631849712499274,
2.877769171661610,
3.270729020771954,
3.816782324993773,
4.521982149490528,
5.392381559425689,
6.434033619962720,
7.652991396265083,
)
DATA_SAMPLE_NORMALISED: Tuple = (
0.000000000000000,
0.000000000000000,
0.000000000000000,
22.475455820476860,
22.615708274894811,
19.705469845722302,
18.828892005610097,
19.600280504908834,
22.826086956521742,
24.719495091164092,
27.068723702664798,
30.504908835904626,
39.551192145862551,
47.685834502103788,
52.980364656381497,
59.186535764375883,
69.985974754558200,
84.046283309957929,
100.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
)
DATA_STANDARD_OBSERVER_2_DEGREE_CIE1931: Dict = {
380: (0.001368, 0.000039, 0.006450),
385: (0.002236, 0.000064, 0.010550),
390: (0.004243, 0.000120, 0.020050),
395: (0.007650, 0.000217, 0.036210),
400: (0.014310, 0.000396, 0.067850),
405: (0.023190, 0.000640, 0.110200),
410: (0.043510, 0.001210, 0.207400),
415: (0.077630, 0.002180, 0.371300),
420: (0.134380, 0.004000, 0.645600),
425: (0.214770, 0.007300, 1.039050),
430: (0.283900, 0.011600, 1.385600),
435: (0.328500, 0.016840, 1.622960),
440: (0.348280, 0.023000, 1.747060),
445: (0.348060, 0.029800, 1.782600),
450: (0.336200, 0.038000, 1.772110),
455: (0.318700, 0.048000, 1.744100),
460: (0.290800, 0.060000, 1.669200),
465: (0.251100, 0.073900, 1.528100),
470: (0.195360, 0.090980, 1.287640),
475: (0.142100, 0.112600, 1.041900),
480: (0.095640, 0.139020, 0.812950),
485: (0.057950, 0.169300, 0.616200),
490: (0.032010, 0.208020, 0.465180),
495: (0.014700, 0.258600, 0.353300),
500: (0.004900, 0.323000, 0.272000),
505: (0.002400, 0.407300, 0.212300),
510: (0.009300, 0.503000, 0.158200),
515: (0.029100, 0.608200, 0.111700),
520: (0.063270, 0.710000, 0.078250),
525: (0.109600, 0.793200, 0.057250),
530: (0.165500, 0.862000, 0.042160),
535: (0.225750, 0.914850, 0.029840),
540: (0.290400, 0.954000, 0.020300),
545: (0.359700, 0.980300, 0.013400),
550: (0.433450, 0.994950, 0.008750),
555: (0.512050, 1.000000, 0.005750),
560: (0.594500, 0.995000, 0.003900),
565: (0.678400, 0.978600, 0.002750),
570: (0.762100, 0.952000, 0.002100),
575: (0.842500, 0.915400, 0.001800),
580: (0.916300, 0.870000, 0.001650),
585: (0.978600, 0.816300, 0.001400),
590: (1.026300, 0.757000, 0.001100),
595: (1.056700, 0.694900, 0.001000),
600: (1.062200, 0.631000, 0.000800),
605: (1.045600, 0.566800, 0.000600),
610: (1.002600, 0.503000, 0.000340),
615: (0.938400, 0.441200, 0.000240),
620: (0.854450, 0.381000, 0.000190),
625: (0.751400, 0.321000, 0.000100),
630: (0.642400, 0.265000, 0.000050),
635: (0.541900, 0.217000, 0.000030),
640: (0.447900, 0.175000, 0.000020),
645: (0.360800, 0.138200, 0.000010),
650: (0.283500, 0.107000, 0.000000),
655: (0.218700, 0.081600, 0.000000),
660: (0.164900, 0.061000, 0.000000),
665: (0.121200, 0.044580, 0.000000),
670: (0.087400, 0.032000, 0.000000),
675: (0.063600, 0.023200, 0.000000),
680: (0.046770, 0.017000, 0.000000),
685: (0.032900, 0.011920, 0.000000),
690: (0.022700, 0.008210, 0.000000),
695: (0.015840, 0.005723, 0.000000),
700: (0.011359, 0.004102, 0.000000),
705: (0.008111, 0.002929, 0.000000),
710: (0.005790, 0.002091, 0.000000),
715: (0.004109, 0.001484, 0.000000),
720: (0.002899, 0.001047, 0.000000),
725: (0.002049, 0.000740, 0.000000),
730: (0.001440, 0.000520, 0.000000),
735: (0.001000, 0.000361, 0.000000),
740: (0.000690, 0.000249, 0.000000),
745: (0.000476, 0.000172, 0.000000),
750: (0.000332, 0.000120, 0.000000),
755: (0.000235, 0.000085, 0.000000),
760: (0.000166, 0.000060, 0.000000),
765: (0.000117, 0.000042, 0.000000),
770: (0.000083, 0.000030, 0.000000),
775: (0.000059, 0.000021, 0.000000),
780: (0.000042, 0.000015, 0.000000),
}
DATA_CMFS: Dict = {
380: np.array([0.001368, 3.90e-05, 0.006450]),
385: np.array([0.002236, 6.40e-05, 0.010550]),
390: np.array([0.004243, 0.000120, 0.020050]),
395: np.array([0.007650, 0.000217, 0.036210]),
400: np.array([0.014310, 0.000396, 0.067850]),
405: np.array([0.023190, 0.000640, 0.110200]),
410: np.array([0.043510, 0.001210, 0.207400]),
415: np.array([0.077630, 0.002180, 0.371300]),
420: np.array([0.134380, 0.004000, 0.645600]),
425: np.array([0.214770, 0.007300, 1.039050]),
430: np.array([0.283900, 0.011600, 1.385600]),
435: np.array([0.328500, 0.016840, 1.622960]),
440: np.array([0.348280, 0.023000, 1.747060]),
445: np.array([0.348060, 0.029800, 1.782600]),
450: np.array([0.336200, 0.038000, 1.772110]),
455: np.array([0.318700, 0.048000, 1.744100]),
460: np.array([0.290800, 0.060000, 1.669200]),
465: np.array([0.251100, 0.073900, 1.528100]),
470: np.array([0.195360, 0.090980, 1.287640]),
475: np.array([0.142100, 0.112600, 1.041900]),
480: np.array([0.095640, 0.139020, 0.812950]),
485: np.array([0.057950, 0.169300, 0.616200]),
490: np.array([0.032010, 0.208020, 0.465180]),
495: np.array([0.014700, 0.258600, 0.353300]),
500: np.array([0.004900, 0.323000, 0.272000]),
505: np.array([0.002400, 0.407300, 0.212300]),
510: np.array([0.009300, 0.503000, 0.158200]),
515: np.array([0.029100, 0.608200, 0.111700]),
520: np.array([0.063270, 0.710000, 0.078250]),
525: np.array([0.109600, 0.793200, 0.057250]),
530: np.array([0.165500, 0.862000, 0.042160]),
535: np.array([0.225750, 0.914850, 0.029840]),
540: np.array([0.290400, 0.954000, 0.020300]),
545: np.array([0.359700, 0.980300, 0.013400]),
550: np.array([0.433450, 0.994950, 0.008750]),
555: np.array([0.512050, 1.000000, 0.005750]),
560: np.array([0.594500, 0.995000, 0.003900]),
565: np.array([0.678400, 0.978600, 0.002750]),
570: np.array([0.762100, 0.952000, 0.002100]),
575: np.array([0.842500, 0.915400, 0.001800]),
580: np.array([0.916300, 0.870000, 0.001650]),
585: np.array([0.978600, 0.816300, 0.001400]),
590: np.array([1.026300, 0.757000, 0.001100]),
595: np.array([1.056700, 0.694900, 0.001000]),
600: np.array([1.062200, 0.631000, 0.000800]),
605: np.array([1.045600, 0.566800, 0.000600]),
610: np.array([1.002600, 0.503000, 0.000340]),
615: np.array([0.938400, 0.441200, 0.000240]),
620: np.array([0.854450, 0.381000, 0.000190]),
625: np.array([0.751400, 0.321000, 0.000100]),
630: np.array([0.642400, 0.265000, 5.00e-05]),
635: np.array([0.541900, 0.217000, 3.00e-05]),
640: np.array([0.447900, 0.175000, 2.00e-05]),
645: np.array([0.360800, 0.138200, 1.00e-05]),
650: np.array([0.283500, 0.107000, 0.000000]),
655: np.array([0.218700, 0.081600, 0.000000]),
660: np.array([0.164900, 0.061000, 0.000000]),
665: np.array([0.121200, 0.044580, 0.000000]),
670: np.array([0.087400, 0.032000, 0.000000]),
675: np.array([0.063600, 0.023200, 0.000000]),
680: np.array([0.046770, 0.017000, 0.000000]),
685: np.array([0.032900, 0.011920, 0.000000]),
690: np.array([0.022700, 0.008210, 0.000000]),
695: np.array([0.015840, 0.005723, 0.000000]),
700: np.array([0.011359, 0.004102, 0.000000]),
705: np.array([0.008111, 0.002929, 0.000000]),
710: np.array([0.005790, 0.002091, 0.000000]),
715: np.array([0.004109, 0.001484, 0.000000]),
720: np.array([0.002899, 0.001047, 0.000000]),
725: np.array([0.002049, 0.000740, 0.000000]),
730: np.array([0.001440, 0.000520, 0.000000]),
735: np.array([0.001000, 0.000361, 0.000000]),
740: np.array([0.000690, 0.000249, 0.000000]),
745: np.array([0.000476, 0.000172, 0.000000]),
750: np.array([0.000332, 0.000120, 0.000000]),
755: np.array([0.000235, 8.50e-05, 0.000000]),
760: np.array([0.000166, 6.00e-05, 0.000000]),
765: np.array([0.000117, 4.20e-05, 0.000000]),
770: np.array([8.30e-05, 3.00e-05, 0.000000]),
775: np.array([5.90e-05, 2.10e-05, 0.000000]),
780: np.array([4.20e-05, 1.50e-05, 0.000000]),
}
DATA_SAMPLE_ABRIDGED: Dict = {
500: 0.0651,
520: 0.0705,
540: 0.0772,
560: 0.0870,
580: 0.1128,
600: 0.1360,
}
DATA_MULTI_SAMPLE_ABRIDGED: Dict = {
500: (0.004900, 0.323000, 0.272000),
510: (0.009300, 0.503000, 0.158200),
520: (0.063270, 0.710000, 0.078250),
530: (0.165500, 0.862000, 0.042160),
540: (0.290400, 0.954000, 0.020300),
550: (0.433450, 0.994950, 0.008750),
560: (0.594500, 0.995000, 0.003900),
}
class TestSpectralShape(unittest.TestCase):
"""
Define :class:`colour.colorimetry.spectrum.SpectralShape` class unit tests
methods.
"""
def test_required_attributes(self):
"""Test the presence of required attributes."""
required_attributes = ("start", "end", "interval", "boundaries")
for attribute in required_attributes:
self.assertIn(attribute, dir(SpectralShape))
def test_required_methods(self):
"""Test the presence of required methods."""
required_methods = (
"__init__",
"__str__",
"__repr__",
"__hash__",
"__iter__",
"__contains__",
"__len__",
"__eq__",
"__ne__",
"range",
)
for method in required_methods:
self.assertIn(method, dir(SpectralShape))
def test_start(self):
"""
Test :attr:`colour.colorimetry.spectrum.SpectralShape.start`
attribute.
"""
self.assertEqual(SpectralShape(360, 830, 1).start, 360)
self.assertRaises(AssertionError, lambda: SpectralShape(360, 360, 1))
self.assertRaises(AssertionError, lambda: SpectralShape(360, 0, 1))
def test_end(self):
"""Test :attr:`colour.colorimetry.spectrum.SpectralShape.end` property."""
self.assertEqual(SpectralShape(360, 830, 1).end, 830)
self.assertRaises(AssertionError, lambda: SpectralShape(830, 830, 1))
self.assertRaises(AssertionError, lambda: SpectralShape(830, 0, 1))
def test_interval(self):
"""
Test :attr:`colour.colorimetry.spectrum.SpectralShape.interval`
property.
"""
self.assertEqual(SpectralShape(360, 830, 1).interval, 1)
def test_boundaries(self):
"""
Test :attr:`colour.colorimetry.spectrum.SpectralShape.boundaries`
property.
"""
shape = SpectralShape(400, 700, 1)
shape.boundaries = (360, 830)
self.assertEqual(shape.start, 360)
self.assertEqual(shape.end, 830)
def test__hash__(self):
"""
Test :meth:`colour.colorimetry.spectrum.SpectralShape.__hash__`
method.
"""
self.assertIsInstance(hash(SpectralShape(0, 10, 0.1)), int)
def test__iter__(self):
"""
Test :meth:`colour.colorimetry.spectrum.SpectralShape.__iter__`
method.
"""
np.testing.assert_almost_equal(
[wavelength for wavelength in SpectralShape(0, 10, 0.1)],
np.arange(0, 10 + 0.1, 0.1),
)
def test__contains__(self):
"""
Test :meth:`colour.colorimetry.spectrum.SpectralShape.__contains__`
method.
"""
self.assertIn(360.1, SpectralShape(360, 830, 0.1))
self.assertNotIn(360.11, SpectralShape(360, 830, 0.1))
self.assertIn(np.array([0.5, 0.6]), SpectralShape(0, 10, 0.1))
self.assertNotIn(np.array([0.5, 0.61]), SpectralShape(0, 10, 0.1))
def test__len__(self):
"""
Test :meth:`colour.colorimetry.spectrum.SpectralShape.__len__`
method.
"""
self.assertEqual(len(SpectralShape(0, 10, 0.1)), 101)
def test__eq__(self):
"""
Test :meth:`colour.colorimetry.spectrum.SpectralShape.__eq__`
method.
"""
self.assertEqual(SpectralShape(0, 10, 0.1), SpectralShape(0, 10, 0.1))
self.assertNotEqual(SpectralShape(0, 10, 0.1), None)
def test__ne__(self):
"""
Test :meth:`colour.colorimetry.spectrum.SpectralShape.__ne__`
method.
"""
self.assertNotEqual(
SpectralShape(0, 10, 0.1), SpectralShape(1, 10, 0.1)
)
def test_range(self):
"""Test :func:`colour.colorimetry.spectrum.SpectralShape.range` method."""
np.testing.assert_almost_equal(
[wavelength for wavelength in SpectralShape(0, 10, 0.1)],
np.arange(0, 10 + 0.1, 0.1),
)
class TestSpectralDistribution(unittest.TestCase):
"""
Define :class:`colour.colorimetry.spectrum.SpectralDistribution`
class unit tests methods.
"""
def setUp(self):
"""Initialise the common tests attributes."""
self._sd = SpectralDistribution(DATA_SAMPLE, name="Sample")
self._non_uniform_sd = SpectralDistribution(
DATA_SAMPLE_NON_UNIFORM,
name="Non Uniform Sample",
strict_name="Strict Non Uniform Sample",
)
self._phi = (1 + np.sqrt(5)) / 2
def test_required_attributes(self):
"""Test the presence of required attributes."""
required_attributes = ("strict_name", "wavelengths", "values", "shape")
for attribute in required_attributes:
self.assertIn(attribute, dir(SpectralDistribution))
def test_required_methods(self):
"""Test the presence of required methods."""
required_methods = (
"__init__",
"interpolate",
"extrapolate",
"align",
"trim",
"normalise",
)
for method in required_methods:
self.assertIn(method, dir(SpectralDistribution))
def test_strict_name(self):
"""
Test :attr:`colour.colorimetry.spectrum.SpectralDistribution.strict_name`
property.
"""
self.assertEqual(self._sd.strict_name, "Sample")
self.assertEqual(
self._non_uniform_sd.strict_name, "Strict Non Uniform Sample"
)
def test_wavelengths(self):
"""
Test :attr:`colour.colorimetry.spectrum.SpectralDistribution.wavelengths`
property.
"""
np.testing.assert_array_equal(self._sd.wavelengths, self._sd.domain)
sd = self._sd.copy()
sd.wavelengths = sd.wavelengths + 10
np.testing.assert_array_equal(sd.wavelengths, sd.domain)
def test_values(self):
"""
Test :attr:`colour.colorimetry.spectrum.SpectralDistribution.values`
property.
"""
np.testing.assert_array_equal(self._sd.values, self._sd.range)
sd = self._sd.copy()
sd.values = sd.values + 10
np.testing.assert_array_equal(sd.values, sd.range)
def test_shape(self):
"""
Test :attr:`colour.colorimetry.spectrum.SpectralDistribution.shape`
property.
"""
self.assertEqual(self._sd.shape, SpectralShape(340, 820, 20))
def test__init__(self):
"""
Test :meth:`colour.colorimetry.spectrum.SpectralDistribution.__init__`
method.
"""
np.testing.assert_almost_equal(
SpectralDistribution(DATA_SAMPLE).wavelengths,
SpectralDistribution(
DATA_SAMPLE.values(),
SpectralShape(340, 820, 20),
).wavelengths,
)
def test_interpolate(self):
"""
Test :func:`colour.colorimetry.spectrum.\
SpectralDistribution.interpolate` method.
"""
np.testing.assert_almost_equal(
reshape_sd(
self._sd,
SpectralShape(self._sd.shape.start, self._sd.shape.end, 1),
"Interpolate",
).values,
DATA_SAMPLE_INTERPOLATED,
decimal=7,
)
# TODO: Remove statement whenever we make "Scipy" 0.19.0 the minimum
# version.
# Skipping tests because of "Scipy" 0.19.0 interpolation code changes.
if LooseVersion(scipy.__version__) < LooseVersion("0.19.0"):
return # pragma: no cover
np.testing.assert_allclose(
reshape_sd(
self._non_uniform_sd,
SpectralShape(
self._non_uniform_sd.shape.start,
self._non_uniform_sd.shape.end,
1,
),
"Interpolate",
).values,
DATA_SAMPLE_INTERPOLATED_NON_UNIFORM,
rtol=0.0000001,
atol=0.0000001,
)
def test_extrapolate(self):
"""
Test :func:`colour.colorimetry.spectrum.\
SpectralDistribution.extrapolate` method.
"""
data = dict(zip(range(25, 35), [0] * 5 + [1] * 5))
sd = SpectralDistribution(data)
sd.extrapolate(SpectralShape(10, 50, 5))
self.assertAlmostEqual(sd[10], 0, places=7)
self.assertAlmostEqual(sd[50], 1, places=7)
sd = SpectralDistribution(
np.linspace(0, 1, 10), np.linspace(25, 35, 10)
)
sd.extrapolate(
SpectralShape(10, 50, 10),
extrapolator_kwargs={
"method": "Linear",
"left": None,
"right": None,
},
)
self.assertAlmostEqual(sd[10], -1.5000000000000004, places=7)
self.assertAlmostEqual(sd[50], 2.4999999999999964, places=7)
def test_align(self):
"""
Test :func:`colour.colorimetry.spectrum.\
SpectralDistribution.align` method.
"""
shape = SpectralShape(100, 900, 5)
self.assertEqual(self._sd.copy().align(shape).shape, shape)
shape = SpectralShape(600, 650, 1)
self.assertEqual(self._sd.copy().align(shape).shape, shape)
def test_trim(self):
"""
Test :func:`colour.colorimetry.spectrum.\
SpectralDistribution.trim` method.
"""
shape = SpectralShape(400, 700, 20)
self.assertEqual(self._sd.copy().trim(shape).shape, shape)
shape = SpectralShape(200, 900, 1)
self.assertEqual(self._sd.copy().trim(shape).shape, self._sd.shape)
def test_normalise(self):
"""
Test :func:`colour.colorimetry.spectrum.\
SpectralDistribution.normalise` method.
"""
np.testing.assert_almost_equal(
self._sd.copy().normalise(100).values, DATA_SAMPLE_NORMALISED
)
class TestMultiSpectralDistributions(unittest.TestCase):
"""
Define :class:`colour.colorimetry.spectrum.MultiSpectralDistributions`
class unit tests methods.
"""
def setUp(self):
"""Initialise the common tests attributes."""
self._labels = ("x_bar", "y_bar", "z_bar")
self._strict_labels = (
"Strict x_bar",
"Strict y_bar",
"Strict z_bar",
)
self._msds = MultiSpectralDistributions(
DATA_STANDARD_OBSERVER_2_DEGREE_CIE1931,
name="Observer",
labels=self._labels,
)
sd = SpectralDistribution(DATA_SAMPLE)
domain = sd.domain
range_ = tstack([sd.values, sd.values, sd.values])
self._sample_msds = MultiSpectralDistributions(
range_,
domain,
name="Sample Observer",
labels=self._labels,
)
sd = SpectralDistribution(DATA_SAMPLE_NON_UNIFORM)
domain = sd.domain
range_ = tstack([sd.values, sd.values, sd.values])
self._non_uniform_sample_msds = MultiSpectralDistributions(
range_,
domain,
name="Non Uniform Sample Observer",
strict_name="Strict Non Uniform Sample Observer",
labels=self._labels,
strict_labels=("Strict x_bar", "Strict y_bar", "Strict z_bar"),
)
self._phi = (1 + np.sqrt(5)) / 2
def test_required_attributes(self):
"""Test the presence of required attributes."""
required_attributes = (
"strict_name",
"strict_labels",
"wavelengths",
"values",
"shape",
)
for attribute in required_attributes:
self.assertIn(attribute, dir(MultiSpectralDistributions))
def test_required_methods(self):
"""Test the presence of required methods."""
required_methods = (
"__init__",
"interpolate",
"extrapolate",
"align",
"trim",
"normalise",
"to_sds",
)
for method in required_methods:
self.assertIn(method, dir(MultiSpectralDistributions))
def test_strict_name(self):
"""
Test :attr:`colour.colorimetry.spectrum.MultiSpectralDistributions.strict_name`
property.
"""
self.assertEqual(self._sample_msds.strict_name, "Sample Observer")
self.assertEqual(
self._non_uniform_sample_msds.strict_name,
"Strict Non Uniform Sample Observer",
)
def test_wavelengths(self):
"""
Test :attr:`colour.colorimetry.spectrum.MultiSpectralDistributions.wavelengths`
property.
"""
np.testing.assert_array_equal(
self._msds.wavelengths, self._msds.domain
)
msds = self._msds.copy()
msds.wavelengths = msds.wavelengths + 10
np.testing.assert_array_equal(msds.wavelengths, msds.domain)
def test_values(self):
"""
Test :attr:`colour.colorimetry.spectrum.MultiSpectralDistributions.values`
property.
"""
np.testing.assert_array_equal(self._msds.values, self._msds.range)
msds = self._msds.copy()
msds.values = msds.values + 10
np.testing.assert_array_equal(msds.values, msds.range)
def test_strict_labels(self):
"""
Test :attr:`colour.colorimetry.spectrum.MultiSpectralDistributions.\
strict_labels` property.
"""
self.assertTupleEqual(
tuple(self._sample_msds.strict_labels), self._labels
)
self.assertEqual(
tuple(self._non_uniform_sample_msds.strict_labels),
("Strict x_bar", "Strict y_bar", "Strict z_bar"),
)
def test_shape(self):
"""
Test :attr:`colour.colorimetry.spectrum.MultiSpectralDistributions.shape`
property.
"""
self.assertEqual(self._msds.shape, SpectralShape(380, 780, 5))
def test__init__(self):
"""
Test :func:`colour.colorimetry.spectrum.\
MultiSpectralDistributions.__init__` method.
"""
np.testing.assert_almost_equal(
MultiSpectralDistributions(DATA_CMFS).wavelengths,
MultiSpectralDistributions(
DATA_CMFS.values(),
SpectralShape(380, 780, 5),
).wavelengths,
)
def test_interpolate(self):
"""
Test :func:`colour.colorimetry.spectrum.\
MultiSpectralDistributions.interpolate` method.
"""
# pylint: disable=E1102
msds = reshape_msds(
self._sample_msds,
SpectralShape(
self._sample_msds.shape.start, self._sample_msds.shape.end, 1
),
"Interpolate",
)
for signal in msds.signals.values():
np.testing.assert_almost_equal(
signal.values, DATA_SAMPLE_INTERPOLATED, decimal=7
)
# TODO: Remove statement whenever we make "Scipy" 0.19.0 the minimum
# version.
# Skipping tests because of "Scipy" 0.19.0 interpolation code changes.
if LooseVersion(scipy.__version__) < LooseVersion("0.19.0"):
return # pragma: no cover
# pylint: disable=E1102
msds = reshape_msds(
self._non_uniform_sample_msds,
SpectralShape(
self._non_uniform_sample_msds.shape.start,
self._non_uniform_sample_msds.shape.end,
1,
),
"Interpolate",
)
for signal in msds.signals.values():
np.testing.assert_allclose(
signal.values,
DATA_SAMPLE_INTERPOLATED_NON_UNIFORM,
rtol=0.0000001,
atol=0.0000001,
)
def test_extrapolate(self):
"""
Test :func:`colour.colorimetry.spectrum.\
MultiSpectralDistributions.extrapolate` method.
"""
data = dict(zip(range(25, 35), tstack([[0] * 5 + [1] * 5] * 3)))
msds = MultiSpectralDistributions(data)
msds.extrapolate(SpectralShape(10, 50, 5))
np.testing.assert_almost_equal(
msds[10], np.array([0.0, 0.0, 0.0]), decimal=7
)
np.testing.assert_almost_equal(
msds[50], np.array([1.0, 1.0, 1.0]), decimal=7
)
msds = MultiSpectralDistributions(
tstack([np.linspace(0, 1, 10)] * 3), np.linspace(25, 35, 10)
)
msds.extrapolate(
SpectralShape(10, 50, 10),
extrapolator_kwargs={
"method": "Linear",
"left": None,
"right": None,
},
)
np.testing.assert_almost_equal(
msds[10], np.array([-1.5, -1.5, -1.5]), decimal=7
)
np.testing.assert_almost_equal(
msds[50], np.array([2.5, 2.5, 2.5]), decimal=7
)
def test_align(self):
"""
Test :func:`colour.colorimetry.spectrum.\
MultiSpectralDistributions.align` method.
"""
msds = self._sample_msds.copy()
shape = SpectralShape(100, 900, 5)
self.assertEqual(msds.align(shape).shape, shape)
shape = SpectralShape(600, 650, 1)
self.assertEqual(msds.align(shape).shape, shape)
def test_trim(self):
"""
Test :func:`colour.colorimetry.spectrum.\
MultiSpectralDistributions.trim` method.
"""
shape = SpectralShape(400, 700, 5)
self.assertEqual(self._msds.copy().trim(shape).shape, shape)
shape = SpectralShape(200, 900, 1)
self.assertEqual(self._msds.copy().trim(shape).shape, self._msds.shape)
def test_normalise(self):
"""
Test :func:`colour.colorimetry.spectrum.
MultiSpectralDistributions.normalise` method.
"""
np.testing.assert_almost_equal(
self._sample_msds.copy().normalise(100).values,
tstack([DATA_SAMPLE_NORMALISED] * 3),
)
def test_to_sds(self):
"""
Test :func:`colour.colorimetry.spectrum.\
MultiSpectralDistributions.to_sds` method.
"""
sds = self._non_uniform_sample_msds.to_sds()
self.assertEqual(len(sds), 3)
for i, sd in enumerate(sds):
self.assertEqual(sd.name, self._labels[i])
self.assertEqual(sd.strict_name, self._strict_labels[i])
class TestReshapeSd(unittest.TestCase):
"""
Define :func:`colour.colorimetry.spectrum.reshape_sd` definition unit
tests methods.
"""
def test_reshape_sd(self):
"""Test :func:`colour.colorimetry.spectrum.reshape_sd` definition."""
sd = SpectralDistribution(DATA_SAMPLE_ABRIDGED)
sd_reshaped = reshape_sd(sd)
self.assertEqual(sd_reshaped, sd.copy().align(SPECTRAL_SHAPE_DEFAULT))
self.assertEqual(reshape_sd(sd), sd_reshaped)
shape = colour.SpectralShape(100, 900, 1)
extrapolator_kwargs = {
"method": "Constant",
"left": 0.05,
"right": 0.15,
}
sd_reshaped = reshape_sd(
sd,
shape,
method="Extrapolate",
extrapolator_kwargs=extrapolator_kwargs,
)
self.assertEqual(
sd_reshaped,
sd.copy().extrapolate(
shape, extrapolator_kwargs=extrapolator_kwargs
),
)
shape = colour.SpectralShape(400, 700, 1)
interpolator_kwargs = {"fill_value": 0}
sd_reshaped = reshape_sd(
sd,
shape,
method="Interpolate",
interpolator=CubicSplineInterpolator,
interpolator_kwargs=interpolator_kwargs,
)
self.assertEqual(
sd_reshaped,
sd.copy().interpolate(
shape,
interpolator=CubicSplineInterpolator,
interpolator_kwargs=interpolator_kwargs,
),
)
sd = SpectralDistribution(DATA_SAMPLE)
shape = colour.SpectralShape(500, 600, 1)
sd_reshaped = reshape_sd(sd, shape, method="Trim")
self.assertEqual(sd_reshaped, sd.copy().trim(shape))
class TestSdsAndMdsToSds(unittest.TestCase):
"""
Define :func:`colour.colorimetry.spectrum.sds_and_msds_to_sds` definition
unit tests methods.
"""
def test_sds_and_msds_to_sds(self):
"""
Test :func:`colour.colorimetry.spectrum.sds_and_msds_to_sds`
definition.
"""
sd_1 = SpectralDistribution(DATA_SAMPLE_ABRIDGED)
sd_2 = SpectralDistribution(DATA_SAMPLE_ABRIDGED)
multi_sds_1 = MultiSpectralDistributions(DATA_MULTI_SAMPLE_ABRIDGED)
multi_sds_2 = MultiSpectralDistributions(DATA_MULTI_SAMPLE_ABRIDGED)
self.assertEqual(
len(
sds_and_msds_to_sds(
[
sd_1,
sd_2,
multi_sds_1,
multi_sds_2,
]
)
),
8,
)
self.assertEqual(len(sds_and_msds_to_sds(multi_sds_1)), 3)
class TestSdsAndMsdsToMsds(unittest.TestCase):
"""
Define :func:`colour.colorimetry.spectrum.sds_and_msds_to_msds`
definition unit tests methods.
"""
def test_sds_and_msds_to_msds(self):
"""
Test :func:`colour.colorimetry.spectrum.sds_and_msds_to_msds`
definition.
"""
sd_1 = SpectralDistribution(DATA_SAMPLE_ABRIDGED)
sd_2 = SpectralDistribution(DATA_SAMPLE_ABRIDGED)
multi_sds_1 = MultiSpectralDistributions(DATA_MULTI_SAMPLE_ABRIDGED)
multi_sds_2 = MultiSpectralDistributions(DATA_MULTI_SAMPLE_ABRIDGED)
self.assertEqual(sds_and_msds_to_msds(multi_sds_1), multi_sds_1)
multi_sds_0 = sds_and_msds_to_msds([multi_sds_1])
np.testing.assert_array_equal(multi_sds_0.range, multi_sds_1.range)
self.assertEqual(sds_and_msds_to_msds([multi_sds_1]), multi_sds_1)
shape = SpectralShape(500, 560, 10)
self.assertEqual(
sds_and_msds_to_msds([sd_1, sd_2, multi_sds_1, multi_sds_2]).shape,
shape,
)
np.testing.assert_almost_equal(
sds_and_msds_to_msds(
[sd_1, sd_2, multi_sds_1, multi_sds_2]
).wavelengths,
shape.range(),
decimal=7,
)
np.testing.assert_almost_equal(
sds_and_msds_to_msds(
[sd_1, sd_2, multi_sds_1, multi_sds_2]
).values,
tstack(
[sd_1.align(shape).values, sd_2.align(shape).values]
+ [
sd.values
for sd in sds_and_msds_to_sds(multi_sds_1.align(shape))
]
+ [
sd.values
for sd in sds_and_msds_to_sds(multi_sds_2.align(shape))
]
),
decimal=7,
)
if __name__ == "__main__":
unittest.main()
| 27.687624 | 87 | 0.63248 | 23,032 | 0.411808 | 0 | 0 | 0 | 0 | 0 | 0 | 6,791 | 0.121422 |
4884cbedf429b58f568f4a96e1328fccb2ba0bb3 | 5,483 | py | Python | experiments/src_exp/data_experimentation/test_clean_store_delete_playground.py | earny-joe/CvDisinfo-Detect | 722b07ef2820759fce8e917ffba6654ac5611c23 | [
"MIT"
]
| 4 | 2020-04-27T04:45:53.000Z | 2022-03-28T10:50:07.000Z | experiments/src_exp/data_experimentation/test_clean_store_delete_playground.py | earny-joe/CvDisinfo-Detect | 722b07ef2820759fce8e917ffba6654ac5611c23 | [
"MIT"
]
| 16 | 2020-04-12T19:09:10.000Z | 2022-03-12T00:38:18.000Z | experiments/src_exp/data_experimentation/test_clean_store_delete_playground.py | earny-joe/CvDisinfo-Detect | 722b07ef2820759fce8e917ffba6654ac5611c23 | [
"MIT"
]
| 1 | 2020-07-19T23:15:05.000Z | 2020-07-19T23:15:05.000Z | # Comment
import pandas as pd
import re
from google.cloud import storage
from pathlib import Path
def load_data(filename, chunksize=10000):
good_columns = [
'created_at',
'entities',
'favorite_count',
'full_text',
'id_str',
'in_reply_to_screen_name',
'in_reply_to_status_id_str',
'is_quote_status',
'lang',
'retweet_count',
'source',
'user',
'quoted_status_id_str',
'quoted_status_permalink'
]
chunks = pd.read_json(
filename,
lines=True,
chunksize=chunksize,
dtype={
'id_str': str,
'in_reply_to_status_id_str': str,
'quoted_status_id_str': str
}
)
df = pd.concat(chunk for chunk in chunks)[good_columns]
return df
def entity_extraction(entity, component, urls=False, user_mentions=False):
try:
if urls is True:
if entity[component] == []:
return None
elif entity[component] != []:
return ','.join([url['url'] for url in entity[component]])
elif user_mentions is True:
if entity[component] == []:
return None
elif entity[component] != []:
return ','.join(
[mention['screen_name'] for mention
in entity[component]]
)
else:
if entity[component] == []:
return None
elif entity[component] != []:
return ','.join([comp['text'] for comp in entity[component]])
except Exception:
return None
def source_extract(text):
try:
regex = re.compile(r'(?<=>).*?(?=<)', re.I)
return regex.search(text).group()
except AttributeError:
return None
def quoted_status_extract(status):
try:
return status['url']
except Exception:
return None
def clean_panacea_data(dataframe):
user_components = [
'created_at',
'description',
'favourites_count',
'followers_count',
'friends_count',
'id_str',
'location',
'name',
'profile_image_url_https',
'screen_name',
'statuses_count',
'verified'
]
dataframe['hashtags'] = dataframe['entities']\
.apply(lambda x: entity_extraction(x, 'hashtags'))
dataframe['symbols'] = dataframe['entities']\
.apply(lambda x: entity_extraction(x, 'symbols'))
dataframe['urls'] = dataframe['entities']\
.apply(lambda x: entity_extraction(x, 'urls', urls=True))
dataframe['user_mentions'] = dataframe['entities']\
.apply(lambda x: entity_extraction(x, 'user_mentions',
user_mentions=True))
dataframe['tweet_source'] = dataframe['source'].apply(source_extract)
for comp in user_components:
dataframe[f'user_{comp}'] = dataframe['user']\
.apply(lambda user: user[comp])
dataframe['quoted_status_url'] = dataframe['quoted_status_permalink']\
.apply(quoted_status_extract)
dataframe.drop(labels=[
'user',
'entities',
'source',
'quoted_status_permalink'
], axis=1, inplace=True)
dataframe.fillna('none', inplace=True)
return dataframe
def cleaning_wrapper(date):
print('Loading data...')
df = load_data(f'{date}/{date}_clean-dataset.json')
print('Cleaning data...')
df = clean_panacea_data(dataframe=df)
print(f'Cleaned data, converting data for date {date} to pickle format...')
df.to_pickle(f'{date}/{date}_clean-dataset.pkl')
def download_blob(bucket_name, source_blob_name, destination_file_name):
"""Downloads a blob from the bucket."""
# bucket_name = "your-bucket-name"
# source_blob_name = "storage-object-name"
# destination_file_name = "local/path/to/file"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
print(f"Blob {source_blob_name} downloaded to {destination_file_name}.")
def upload_blob(bucket_name, source_file_name, destination_blob_name):
"""Uploads a file to the bucket."""
# bucket_name = "your-bucket-name"
# source_file_name = "local/path/to/file"
# destination_blob_name = "storage-object-name"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
print(f"File {source_file_name} uploaded to {destination_blob_name}.")
def main():
date = input('Date whose data will be cleaned (format: YYYY-MM-DD):\n')
bucket_name = 'thepanacealab_covid19twitter'
download_blob(
bucket_name=bucket_name,
source_blob_name=f'''
dailies/{date}/panacealab_{date}_clean-dataset.json
''',
destination_file_name=f'{date}/{date}_clean-dataset.json'
)
cleaning_wrapper(date)
upload_blob(
bucket_name=bucket_name,
source_file_name=f'{date}/{date}_clean-dataset.pkl',
destination_blob_name=f'dailies/{date}/{date}_clean-dataset.pkl'
)
file_delete_path = Path.cwd() / date / f'{date}_clean-dataset.json'
file_delete_path.unlink()
print(f'{date}_clean-dataset.json removed from {date} folder.')
if __name__ == '__main__':
main()
| 31.331429 | 79 | 0.61718 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,739 | 0.317162 |
4885063d81ad224f010848be20c3a4db6e1c984c | 18,988 | py | Python | codes/correl.py | KurmasanaWT/community | 5fc9e7da5b3e8df2bc9f85580a070de8c868a656 | [
"MIT"
]
| null | null | null | codes/correl.py | KurmasanaWT/community | 5fc9e7da5b3e8df2bc9f85580a070de8c868a656 | [
"MIT"
]
| null | null | null | codes/correl.py | KurmasanaWT/community | 5fc9e7da5b3e8df2bc9f85580a070de8c868a656 | [
"MIT"
]
| null | null | null | from dash import dcc, html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
import numpy as np
import pandas as pd
import plotly.io as pio
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import yfinance as yf
import math
from sklearn.linear_model import LinearRegression
from app import app
np.seterr(divide='ignore')
pd.options.display.float_format = '{:,.2f}'.format
# FORMATA E CONFIGURA GRÁFICOS
pio.templates["draft"] = go.layout.Template(
layout=go.Layout(
title_x = 0.0,
title_pad = dict(l=10, t=10),
margin = dict(l=50,t=50, b=50, r=50, pad=0, autoexpand=True),
font = dict(family="Arial", size=10),
autosize=True,
),
layout_annotations=[
dict(
name="draft watermark",
text="KWT-Community",
textangle=-30,
opacity=0.03,
font=dict(family="Arial", color="black", size=80),
xref="paper",
yref="paper",
x=0.5,
y=0.5,
showarrow=False,
)
]
)
pio.templates.default = "seaborn+draft"
plotres:dict = dict(width=1920, height=1080)
config1 = {
"displaylogo": False,
"toImageButtonOptions": plotres,
"modeBarButtonsToAdd": [
"drawline",
"drawopenpath",
"drawclosedpath",
"drawcircle",
"drawrect",
"eraseshape",
"hoverClosestCartesian",
"hoverCompareCartesian"
]
}
# INPUTS PARA DROPDOWN MENU
tickers = pd.read_csv('db/tickers.csv', delimiter=';') # ativos da na bolsa brasileira
tickers['label'] = tickers['value']+" - "+tickers['label']
tickers['value'] = tickers['value']+".SA"
other = pd.read_csv('db/other.csv', delimiter=';') # outros ativos e índices
other['label'] = other['value']+" - "+other['label']
tickers=pd.concat([tickers,other])
tickers = tickers.to_dict('records')
periods = pd.read_csv('db/periods.csv', delimiter=';').to_dict('records') # períodos de análise
intervals = pd.read_csv('db/intervals.csv', delimiter=';').to_dict('records') # intervalos entre dados do período
def market_beta(X,Y,N):
"""
X = The independent variable which is the Market
Y = The dependent variable which is the Stock
N = The length of the Window
It returns the alphas and the betas of
the rolling regression
"""
# all the observations
obs = len(X)
# initiate the betas with null values
betas = np.full(obs, np.nan)
# initiate the alphas with null values
alphas = np.full(obs, np.nan)
for i in range((obs-N)):
regressor = LinearRegression()
regressor.fit(X.to_numpy()[i : i + N+1].reshape(-1,1), Y.to_numpy()[i : i + N+1])
betas[i+N] = regressor.coef_[0]
alphas[i+N] = regressor.intercept_
return(alphas, betas)
# LAYOUT
layout = dbc.Container(
children=[
dcc.Loading(
#className="kwtload",
id="load_o1",
color='#0a0',
style={'background-color':'rgba(0, 0, 0, 0.5)'},
parent_style={},
fullscreen=True,
children=html.Span(id="correl_load_o1", children=["LOADING..."]),
type="default",
),
dbc.Row([
html.Div(className='kwtdrops', children=[
html.H5("ATIVO"), dcc.Dropdown( id="ticker", options=tickers, value='VALE3.SA', clearable=False, style={'width':'300px'} ),
html.H5("BENCHMARK"), dcc.Dropdown( id="indexer", options=tickers, value='^BVSP', clearable=False, style={'width':'300px'} ),
html.H5("PERÍODO"), dcc.Dropdown( id="periods", options=periods, value='1y', clearable=False, style={'width':'10rem'} ),
html.H5("INTERVALO"), dcc.Dropdown( id="intervals", options=intervals, value='1d', clearable=False, style={'width':'10rem'} ),
dbc.Button(className="kwtchartbtn",id='submitb', n_clicks=0, children='Atualizar')
]),
]),
html.Br(),
dbc.Row([
dcc.Graph(id="correl_graph", config=config1),
dcc.Graph(id="correl_graph1", config=config1),
dcc.Graph(id="correl_graph2", config=config1),
dcc.Graph(id="correl_graph3", config=config1),
]),
], fluid=True)
def get():
return html.Div(layout)
####### CALLBACKS #######
####### CALLBACK PAINEL MERCADO
#
@app.callback(
[ Output("correl_graph", "figure"),
Output("correl_graph1", "figure"),
Output("correl_graph2", "figure"),
Output("correl_graph3", "figure"),
Output("correl_load_o1", "children") ],
[ Input('submitb', 'n_clicks') ],
[ State("ticker", "value"),
State("indexer", "value"),
State("periods", "value"),
State("intervals", "value") ],
)
###### FUNC. CALLBACK PAINEL MERCADO
#
def display(sutb, tkr, idx, prd, itv):
per21dd=21
per50dd=50
per200dd=200
####### DOWNLOAD DE PREÇO E VOLUME DO ATIVO ANALISADO
df = yf.download(tkr, interval=itv, period=prd)
df = pd.DataFrame(df)
df = df[df.index.dayofweek < 5]
df.dropna(inplace=True)
#df.fillna( method ='ffill', inplace = True)
#df.fillna( method ='bfill', inplace = True)
### VARIAÇÃO E RETORNO
#df['Return'] = (np.log(df.Close / df.Close.shift(1)))*100
df['Return'] = df.Close.pct_change()
print(df['Return'].isnull().sum())
df.dropna(inplace=True)
print(df['Return'].isnull().sum())
df['PrevClose']=df.Close.shift(1)
df['VarClose']=((df.Close - df.Close.shift(1))/df.Close.shift(1))*100
#df['VarClose'] = df.Close.pct_change()
df['Return21dd'] = (np.log(df.Close / df.Close.shift(per21dd)))*100
df['Return50dd'] = (np.log(df.Close / df.Close.shift(per50dd)))*100
df['Return200dd'] = (np.log(df.Close / df.Close.shift(per200dd)))*100
df['VarAcum'] = ((df.Close/df.Close.iloc[0])-1)*100
df['RetAcum'] = (np.log(df.Close / df.Close.iloc[0]))*100
df["RetAcumColor"] = np.where(df.RetAcum < 0, 'red', 'green')
### REVERSÃO À MÉDIA ARITIMÉTICA DE 21 DIAS
df['CSMA21dd']=df.Close.rolling(per21dd).mean()
df['RSMA21dd']=((df.Close/df['CSMA21dd'])-1)*100
df["RSMA21dd_Color"] = np.where(df.RSMA21dd < 0, 'red', 'green')
### REVERSÃO À MÉDIA ARITIMÉTICA DE 50 DIAS
df['CSMA50dd']=df.Close.rolling(per50dd).mean()
df['RSMA50dd']=((df.Close/df['CSMA50dd'])-1)*100
df["RSMA50dd_Color"] = np.where(df.RSMA50dd < 0, 'red', 'green')
### REVERSÃO À MÉDIA EXPONENCIAL DE 200 DIAS
df['CEMA200dd']=df.Close.ewm(span=per200dd, min_periods=per200dd, adjust=True).mean()
df['REMA200dd']=((df.Close/df['CEMA200dd'])-1)*100
df["REMA200dd_Color"] = np.where(df.REMA200dd < 0, 'red', 'green')
#print(df['Return'].isnull().sum())
#df.Return
####### DOWNLOAD DE PREÇO E VOLUME DO ATIVO DE REFERÊNCIA (BENCHMARK)
dfi = yf.download(idx, interval=itv, period=prd)
dfi = pd.DataFrame(dfi)
dfi = dfi[dfi.index.dayofweek < 5]
dfi.dropna(inplace=True)
#dfi.fillna( method ='ffill', inplace = True)
#dfi.fillna( method ='bfill', inplace = True)
### VARIAÇÃO E RETORNO
#dfi['Return'] = (np.log(dfi.Close / dfi.Close.shift(1)))*100
dfi['Return'] = dfi.Close.pct_change()
print(dfi['Return'].isnull().sum())
dfi.dropna(inplace=True)
print(dfi['Return'].isnull().sum())
dfi['PrevClose']=dfi.Close.shift(1)
dfi['VarClose']=((dfi.Close - dfi.Close.shift(1))/dfi.Close.shift(1))*100
#dfi['VarClose'] = dfi.Close.pct_change()
dfi['Return21dd'] = (np.log(dfi.Close / dfi.Close.shift(per21dd)))*100
dfi['Return50dd'] = (np.log(dfi.Close / dfi.Close.shift(per50dd)))*100
dfi['Return200dd'] = (np.log(dfi.Close / dfi.Close.shift(per200dd)))*100
dfi['VarAcum'] = ((dfi.Close/dfi.Close.iloc[0])-1)*100
dfi['RetAcum'] = (np.log(dfi.Close / dfi.Close.iloc[0]))*100
dfi["RetAcumColor"] = np.where(dfi.RetAcum < 0, 'red', 'green')
### REVERSÃO À MÉDIA ARITIMÉTICA DE 21 DIAS
dfi['CSMA21dd']=dfi.Close.rolling(per21dd).mean()
dfi['RSMA21dd']=((dfi.Close/dfi['CSMA21dd'])-1)*100
dfi["RSMA21dd_Color"] = np.where(dfi.RSMA21dd < 0, 'red', 'green')
### REVERSÃO À MÉDIA ARITIMÉTICA DE 50 DIAS
dfi['CSMA50dd']=dfi.Close.rolling(per50dd).mean()
dfi['RSMA50dd']=((dfi.Close/dfi['CSMA50dd'])-1)*100
dfi["RSMA50dd_Color"] = np.where(dfi.RSMA50dd < 0, 'red', 'green')
### REVERSÃO À MÉDIA EXPONENCIAL DE 200 DIAS
dfi['CEMA200dd']=dfi.Close.ewm(span=per200dd, min_periods=per200dd, adjust=True).mean()
dfi['REMA200dd']=((dfi.Close/dfi['CEMA200dd'])-1)*100
dfi["REMA200dd_Color"] = np.where(dfi.REMA200dd < 0, 'red', 'green')
#print(dfi['Return'].isnull().sum())
#dfi.Return
### ROLLING CORRELATION
df['RCorr21dd'] = df['VarClose'].rolling(per21dd).corr(dfi['VarClose'])
df['RCorr50dd'] = df['VarClose'].rolling(per50dd).corr(dfi['VarClose'])
df['RCorr200dd'] = df['VarClose'].rolling(per200dd).corr(dfi['VarClose'])
### RETORNO COMPARADO
df['RetComp'] = df['RetAcum'] / dfi['RetAcum']
### CALCULA ALPHA E BETA
df['Alpha21dd'],df['Beta21dd'] = market_beta(df.Return, dfi.Return, 21)
df['Alpha50dd'],df['Beta50dd'] = market_beta(df.Return, dfi.Return, 50)
df['Alpha200dd'],df['Beta200dd'] = market_beta(df.Return, dfi.Return, 200)
####### CONSTROI GRÁFICOS
#
### FIG 0 ---------------------------------------------------------------------------
fig = go.Figure()
fig.add_trace( go.Candlestick (
x=df.index,
open=df.Open,
high=df.High,
low=df.Low,
close=df.Close,
name=tkr) )
fig.add_trace( go.Scatter(x=df.index, y=df.CSMA21dd, mode='lines', name='MMA21', line_width=1,line_color='orange') )
fig.add_trace( go.Scatter(x=df.index, y=df.CSMA50dd, mode='lines', name='MMA50', line_width=1,line_color='navy') )
fig.add_trace( go.Scatter(x=df.index, y=df.CEMA200dd, mode='lines', name='EMA200', line_width=1,line_color='purple') )
### FIG 1 ---------------------------------------------------------------------------
fig1 = make_subplots(
rows=1, cols=2,
column_widths=[.85,.15],
subplot_titles=("", "Histograma (Percent)")
)
fig1.add_trace( go.Scatter(x=df.index, y=df.RSMA21dd, mode='lines', name='R_MMA21', line_width=1, line_color='orange'), col=1, row=1 )
fig1.add_trace( go.Scatter(x=df.index, y=df.RSMA50dd, mode='lines', name='R_MMA50', line_width=1,line_color='navy'), col=1, row=1 )
fig1.add_trace( go.Scatter(x=df.index, y=df.REMA200dd, mode='lines', name='R_EMA200', line_width=1,line_color='purple'), col=1, row=1 )
fig1.add_hline(y=0,
line_color='black', line_dash='dot', line_width=1,
annotation_text="Centro da Média",
annotation_position="bottom left", col=1, row=1)
fig1.add_trace( go.Histogram(x=df.RSMA21dd, name='R_MMA21', histnorm='percent', offsetgroup=0), col=2, row=1 )
fig1.add_trace( go.Histogram(x=df.RSMA50dd, name='R_MMA50', histnorm='percent', offsetgroup=0), col=2, row=1 )
fig1.add_trace( go.Histogram(x=df.REMA200dd, name='R_EMA200', histnorm='percent', offsetgroup=0), col=2, row=1 )
fig1.update_layout(
xaxis=dict(showgrid=False),
xaxis2=dict(showgrid=False)
)
fig1.update_traces(bingroup='overlay', nbinsx=20, opacity=0.5, col=2, row=1, cumulative_enabled=False)
### FIG 2 ---------------------------------------------------------------------------
fig2 = make_subplots(
rows=3, cols=2,
#subplot_titles=("Reversão à Média", "Indicador"),
column_widths=[0.85,.15],
row_heights=[.33, .33, .33],
specs= [
[{'type' : 'xy'}, {'type' : 'indicator'}],
[{'type' : 'xy'}, {'type' : 'indicator'}],
[{'type' : 'xy'}, {'type' : 'indicator'}],
],
#subplot_titles=('Mercedes', 'Ford', 'BMW')
#specs=[
# [{}],
# [{}],
# [{}],
# ]
)
fig2.add_trace( go.Scatter(x=df.index, y=df.RSMA21dd, mode='lines', line_width=1, name='R_MMA21', line_color='orange') , row=1, col=1 ),
fig2.add_trace(
go.Indicator(
mode = "gauge+number+delta",
value = df.RSMA21dd[-1],
#title = {'text': "Reversão MMA21"},
delta = {'reference': df.RSMA21dd.mean(), 'relative': True,'valueformat':'.2%'},
gauge={
'axis':{
'range':[math.floor(df.RSMA21dd.min()),math.ceil(df.RSMA21dd.max())],
'dtick': ( math.ceil(df.RSMA21dd.max()) - math.floor(df.RSMA21dd.min()) )/10,
'tickformat':'0.1f'
},
'steps' : [
{'range': [math.floor(df.RSMA21dd.min()), (math.floor(df.RSMA21dd.min())*0.5)], 'color': "rgba(50,50,200,0.55)"},
{'range': [(math.ceil(df.RSMA21dd.max())*0.5), math.ceil(df.RSMA21dd.max())], 'color': "rgba(200,50,50,0.55)"}],
'threshold' : {'line': {'color': "red", 'width': 4},
'thickness': 1,
'value': df.RSMA21dd.mean()},
'bar': {'color': "black"}
}
), row=1, col=2 ),
fig2.add_trace( go.Scatter(x=df.index, y=df.RSMA50dd, mode='lines', line_width=1, name='R_MMA50', line_color='navy') , row=2, col=1 )
fig2.add_trace(
go.Indicator(
mode = "gauge+number+delta",
value = df.RSMA50dd[-1],
#title = {'text': "Reversão MMA50"},
delta = {'reference': df.RSMA50dd.mean(), 'relative': True, 'valueformat':'.2%'},
gauge={
'axis':{
'range':[math.floor(df.RSMA50dd.min()),math.ceil(df.RSMA50dd.max())],
'dtick': ( math.ceil(df.RSMA50dd.max()) - math.floor(df.RSMA50dd.min()) )/10,
'tickformat':'0.1f'
},
'steps' : [
{'range': [math.floor(df.RSMA50dd.min()), (math.floor(df.RSMA50dd.min())*0.5)], 'color': "rgba(50,50,200,0.55)"},
{'range': [(math.ceil(df.RSMA50dd.max())*0.5), math.ceil(df.RSMA50dd.max())], 'color': "rgba(200,50,50,0.55)"}],
'threshold' : {'line': {'color': "red", 'width': 4},
'thickness': 1,
'value': df.RSMA50dd.mean()},
'bar': {'color': "black"}
}
), row=2, col=2 ),
fig2.add_trace( go.Scatter(x=df.index, y=df.REMA200dd, mode='lines', line_width=1, name='R_EMA200', line_color='purple') , row=3, col=1 )
fig2.add_trace(
go.Indicator(
mode = "gauge+number+delta",
value = df.REMA200dd[-1],
#title = {'text': "Reversão EMA200"},
delta = {'reference': df.REMA200dd.mean(), 'relative': True, 'valueformat':'.2%'},
gauge={
'axis':{
'range':[math.floor(df.REMA200dd.min()),math.ceil(df.REMA200dd.max())],
'dtick': ( math.ceil(df.REMA200dd.max()) - math.floor(df.REMA200dd.min()) )/10,
'tickformat':'0.1f'
},
'steps' : [
{'range': [math.floor(df.REMA200dd.min()), (math.floor(df.REMA200dd.min())*0.5)], 'color': "rgba(50,50,200,0.55)"},
{'range': [(math.ceil(df.REMA200dd.max())*0.5), math.ceil(df.REMA200dd.max())], 'color': "rgba(200,50,50,0.55)"}],
'threshold' : {'line': {'color': "red", 'width': 4},
'thickness': 1,
'value': df.REMA200dd.mean()},
'bar': {'color': "black"}
}
), row=3, col=2 ),
#fig2.add_hline(y=0,
# line_color='black', line_dash='dot', line_width=1,
# annotation_text="Centro da Média",
# annotation_position="bottom left",
# row=1, col=1,)
### FIG 3 ---------------------------------------------------------------------------
fig3 = make_subplots(
rows=1, cols=3,
column_widths=[.33, .33, .33],
subplot_titles=("MÉDIA vs RSMA21dd", "MÉDIA vs RSMA50dd", "MÉDIA vs REMA200dd"),
)
fig3.add_trace( go.Scatter(name='', x=df.RSMA21dd, y=df.CSMA21dd, text=df.index.strftime("%d/%m/%Y"),
mode='markers',
marker=dict(
size=7,
color=df.RSMA21dd, #set color equal to a variable
colorscale='Bluered', # one of plotly colorscales
opacity=0.5,
showscale=False),
hovertemplate = "%{text} <br> RSMA21dd : %{x:.2f} </br> MÉDIA PREÇO : %{y:,.2f}"
), row=1, col=1
)
fig3.add_trace( go.Scatter(name='', x=df.RSMA50dd, y=df.CSMA50dd, text=df.index.strftime("%d/%m/%Y"),
mode='markers',
marker=dict(
size=7,
color=df.RSMA50dd, #set color equal to a variable
colorscale='Bluered', # one of plotly colorscales
opacity=0.5,
showscale=False),
hovertemplate = "%{text} <br> RSMA50dd : %{x:.2f} </br> MÉDIA PREÇO : %{y:,.2f}"
), row=1, col=2
)
fig3.add_trace( go.Scatter(name='', x=df.REMA200dd, y=df.CEMA200dd, text=df.index.strftime("%d/%m/%Y"),
mode='markers',
marker=dict(
size=7,
color=df.REMA200dd, #set color equal to a variable
colorscale='Bluered', # one of plotly colorscales
opacity=0.5,
showscale=False),
hovertemplate = "%{text} <br> REMA200dd : %{x:.2f} </br> MÉDIA PREÇO : %{y:,.2f}"
), row=1, col=3
)
####### ATUALIZA LAYOUT, TRACES E AXES DOS GRÁFICOS
#
fig.update_layout( title='<b>EVOLUÇÃO DO PREÇO</b>', xaxis_title='',yaxis_title='<b>Preço</b>', xaxis_rangeslider_visible=False, hovermode='x unified', legend=dict(orientation="h") )
fig1.update_layout(title_text='REVERSÃO À MÉDIA - Agrupado', yaxis_title='<b>Valor</b>', xaxis_rangeslider_visible=False, hovermode='x unified', legend=dict(orientation="h") )
fig2.update_layout(title_text='REVERSÃO À MÉDIA', yaxis_title='<b>Valor</b>', xaxis_rangeslider_visible=False, hovermode='x unified', legend=dict(orientation="h") )
fig3.update_layout( showlegend=False )
fig.update_xaxes( rangebreaks=[ dict(bounds=["sat", "mon"]) ] )
fig1.update_xaxes( rangebreaks=[ dict(bounds=["sat", "mon"]) ] )
fig2.update_xaxes( rangebreaks=[ dict(bounds=["sat", "mon"]) ] )
fig3.update_xaxes( rangebreaks=[ dict(bounds=["sat", "mon"]) ] )
return fig, fig2, fig1, fig3, "" | 39.394191 | 186 | 0.555825 | 0 | 0 | 0 | 0 | 14,354 | 0.753412 | 0 | 0 | 6,155 | 0.323063 |
488664091cf5f88c9226ceceac51ef1864450390 | 1,459 | py | Python | custom_components/blitzortung/geohash_utils.py | Nag94/HomeAssistantConfig | d5f806e05be8d92bf487c58322d20cd9b08c6b98 | [
"Unlicense"
]
| 163 | 2020-08-01T12:19:46.000Z | 2022-03-28T09:04:57.000Z | custom_components/blitzortung/geohash_utils.py | Nag94/HomeAssistantConfig | d5f806e05be8d92bf487c58322d20cd9b08c6b98 | [
"Unlicense"
]
| 81 | 2020-08-04T00:28:46.000Z | 2022-03-29T15:48:51.000Z | custom_components/blitzortung/geohash_utils.py | Nag94/HomeAssistantConfig | d5f806e05be8d92bf487c58322d20cd9b08c6b98 | [
"Unlicense"
]
| 28 | 2020-08-02T12:02:24.000Z | 2022-03-22T00:07:34.000Z | import math
from collections import namedtuple
from . import geohash
Box = namedtuple("Box", ["s", "w", "n", "e"])
def geohash_bbox(gh):
ret = geohash.bbox(gh)
return Box(ret["s"], ret["w"], ret["n"], ret["e"])
def bbox(lat, lon, radius):
lat_delta = radius * 360 / 40000
lon_delta = lat_delta / math.cos(lat * math.pi / 180.0)
return Box(lat - lat_delta, lon - lon_delta, lat + lat_delta, lon + lon_delta)
def overlap(a1, a2, b1, b2):
return a1 < b2 and a2 > b1
def box_overlap(box1: Box, box2: Box):
return overlap(box1.s, box1.n, box2.s, box2.n) and overlap(
box1.w, box1.e, box2.w, box2.e
)
def compute_geohash_tiles(lat, lon, radius, precision):
bounds = bbox(lat, lon, radius)
center = geohash.encode(lat, lon, precision)
stack = set()
checked = set()
stack.add(center)
checked.add(center)
while stack:
current = stack.pop()
for neighbor in geohash.neighbors(current):
if neighbor not in checked and box_overlap(geohash_bbox(neighbor), bounds):
stack.add(neighbor)
checked.add(neighbor)
return checked
def geohash_overlap(lat, lon, radius, max_tiles=9):
result = []
for precision in range(1, 13):
tiles = compute_geohash_tiles(lat, lon, radius, precision)
if len(tiles) <= 9:
result = tiles
precision += 1
else:
break
return result
| 24.728814 | 87 | 0.607265 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.019877 |
4887385245fd69a9bed6f6ead371a6a688979596 | 7,398 | py | Python | deepmask/models/DeepMask.py | TJUMMG/SiamDMU | 728ba1333b8d600a8b238a7d29901a01b653c33d | [
"Apache-2.0"
]
| 3 | 2021-08-25T01:51:04.000Z | 2022-03-04T06:30:32.000Z | deepmask/models/DeepMask.py | TJUMMG/SiamDMU | 728ba1333b8d600a8b238a7d29901a01b653c33d | [
"Apache-2.0"
]
| null | null | null | deepmask/models/DeepMask.py | TJUMMG/SiamDMU | 728ba1333b8d600a8b238a7d29901a01b653c33d | [
"Apache-2.0"
]
| null | null | null | import torch
import torch.nn as nn
import torchvision
from collections import namedtuple
Config = namedtuple('Config', ['iSz', 'oSz', 'gSz'])
default_config = Config(iSz=160, oSz=56, gSz=112)
class Reshape(nn.Module):
def __init__(self, oSz):
super(Reshape, self).__init__()
self.oSz = oSz
def forward(self, x):
b = x.shape[0]
return x.permute(0, 2, 3, 1).view(b, -1, self.oSz, self.oSz)
class SymmetricPad2d(nn.Module):
def __init__(self, padding):
super(SymmetricPad2d, self).__init__()
self.padding = padding
try:
self.pad_l, self.pad_b, self.pad_r, self.pad_t = padding
except:
self.pad_l, self.pad_b, self.pad_r, self.pad_t = [padding,]*4
def forward(self, input):
assert len(input.shape) == 4, "only Dimension=4 implemented"
h = input.shape[2] + self.pad_t + self.pad_b
w = input.shape[3] + self.pad_l + self.pad_r
assert w >= 1 and h >= 1, "input is too small"
output = torch.zeros(input.shape[0], input.shape[1], h, w).to(input.device)
c_input = input
if self.pad_t < 0:
c_input = c_input.narrow(2, -self.pad_t, c_input.shape[2] + self.pad_t)
if self.pad_b < 0:
c_input = c_input.narrow(2, 0, c_input.shape[2] + self.pad_b)
if self.pad_l < 0:
c_input = c_input.narrow(3, -self.pad_l, c_input.shape[3] + self.pad_l)
if self.pad_r < 0:
c_input = c_input.narrow(3, 0, c_input.shape[3] + self.pad_r)
c_output = output
if self.pad_t > 0:
c_output = c_output.narrow(2, self.pad_t, c_output.shape[2] - self.pad_t)
if self.pad_b > 0:
c_output = c_output.narrow(2, 0, c_output.shape[2] - self.pad_b)
if self.pad_l > 0:
c_output = c_output.narrow(3, self.pad_l, c_output.shape[3] - self.pad_l)
if self.pad_r > 0:
c_output = c_output.narrow(3, 0, c_output.shape[3] - self.pad_r)
c_output.copy_(c_input)
assert w >= 2*self.pad_l and w >= 2*self.pad_r and h >= 2*self.pad_t and h >= 2*self.pad_b
"input is too small"
for i in range(self.pad_t):
output.narrow(2, self.pad_t-i-1, 1).copy_(output.narrow(2, self.pad_t+i, 1))
for i in range(self.pad_b):
output.narrow(2, output.shape[2] - self.pad_b + i, 1).copy_(
output.narrow(2, output.shape[2] - self.pad_b - i-1, 1))
for i in range(self.pad_l):
output.narrow(3, self.pad_l-i-1, 1).copy_(output.narrow(3, self.pad_l+i, 1))
for i in range(self.pad_r):
output.narrow(3, output.shape[3] - self.pad_r + i, 1).copy_(
output.narrow(3, output.shape[3] - self.pad_r - i-1, 1))
return output
def updatePadding(net, nn_padding):
typename = torch.typename(net)
# print(typename)
if typename.find('Sequential') >= 0 or typename.find('Bottleneck') >= 0:
modules_keys = list(net._modules.keys())
for i in reversed(range(len(modules_keys))):
subnet = net._modules[modules_keys[i]]
out = updatePadding(subnet, nn_padding)
if out != -1:
p = out
in_c, out_c, k, s, _, d, g, b = \
subnet.in_channels, subnet.out_channels, \
subnet.kernel_size[0], subnet.stride[0], \
subnet.padding[0], subnet.dilation[0], \
subnet.groups, subnet.bias,
conv_temple = nn.Conv2d(in_c, out_c, k, stride=s, padding=0,
dilation=d, groups=g, bias=b)
conv_temple.weight = subnet.weight
conv_temple.bias = subnet.bias
if p > 1:
net._modules[modules_keys[i]] = nn.Sequential(SymmetricPad2d(p), conv_temple)
else:
net._modules[modules_keys[i]] = nn.Sequential(nn_padding(p), conv_temple)
else:
if typename.find('torch.nn.modules.conv.Conv2d') >= 0:
k_sz, p_sz = net.kernel_size[0], net.padding[0]
if ((k_sz == 3) or (k_sz == 7)) and p_sz != 0:
return p_sz
return -1
class DeepMask(nn.Module):
def __init__(self, config=default_config, context=True):
super(DeepMask, self).__init__()
self.config = config
self.context = context # without context
self.strides = 16
self.fSz = -(-self.config.iSz // self.strides) # ceil div
self.trunk = self.creatTrunk()
updatePadding(self.trunk, nn.ReflectionPad2d)
self.crop_trick = nn.ZeroPad2d(-16//self.strides) # for training
self.maskBranch = self.createMaskBranch()
self.scoreBranch = self.createScoreBranch()
# npt = sum(p.numel() for p in self.trunk.parameters()) / 1e+06
# npm = sum(p.numel() for p in self.maskBranch.parameters()) / 1e+06
# nps = sum(p.numel() for p in self.scoreBranch.parameters()) / 1e+06
# print('| number of paramaters trunk: {:.3f} M'.format(npt))
# print('| number of paramaters mask branch: {:.3f} M'.format(npm))
# print('| number of paramaters score branch: {:.3f} M'.format(nps))
# print('| number of paramaters total: {:.3f} M'.format(npt + nps + npm))
def forward(self, x):
feat = self.trunk(x)
if self.context:
feat = self.crop_trick(feat)
mask = self.maskBranch(feat)
score = self.scoreBranch(feat)
return mask, score
def creatTrunk(self):
resnet50 = torchvision.models.resnet50(pretrained=True)
trunk1 = nn.Sequential(*list(resnet50.children())[:-3])
trunk2 = nn.Sequential(
nn.Conv2d(1024, 128, 1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 512, self.fSz)
)
return nn.Sequential(trunk1, trunk2)
def createMaskBranch(self):
maskBranch = nn.Sequential(
nn.Conv2d(512, self.config.oSz**2, 1),
Reshape(self.config.oSz),
)
if self.config.gSz > self.config.oSz:
upSample = nn.UpsamplingBilinear2d(size=[self.config.gSz, self.config.gSz])
maskBranch = nn.Sequential(maskBranch, upSample)
return maskBranch
def createScoreBranch(self):
scoreBranch = nn.Sequential(
nn.Dropout(0.5),
nn.Conv2d(512, 1024, 1),
nn.Threshold(0, 1e-6), # do not know why
nn.Dropout(0.5),
nn.Conv2d(1024, 1, 1),
)
return scoreBranch
if __name__ == '__main__':
a = SymmetricPad2d(3)
x = torch.tensor([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]])
out = a(x)
print(out)
import torch
Config = namedtuple('Config', ['iSz', 'oSz', 'gSz'])
config = Config(iSz=160, oSz=56, gSz=112)
model = DeepMask(config).cuda()
# training mode
x = torch.rand(32, 3, config.iSz+32, config.iSz+32).cuda()
pred_mask, pred_cls = model(x)
print("Output (training mode)", pred_mask.shape, pred_cls.shape)
# full image testing mode
model.context = False # really important!!
input_size = config.iSz + model.strides * 16 + (model.context * 32)
x = torch.rand(8, 3, input_size, input_size).cuda()
pred_mask, pred_cls = model(x)
print("Output (testing mode)", pred_mask.shape, pred_cls.shape)
| 40.42623 | 98 | 0.577994 | 4,967 | 0.671398 | 0 | 0 | 0 | 0 | 0 | 0 | 831 | 0.112328 |
488892bb03ecb663538dd964ad7c41d6e64946b1 | 363 | py | Python | phi.py | filiptronicek/constants | 404b6dbff33cff006d8f38ff08f6a4c2eaa19cdf | [
"MIT"
]
| null | null | null | phi.py | filiptronicek/constants | 404b6dbff33cff006d8f38ff08f6a4c2eaa19cdf | [
"MIT"
]
| null | null | null | phi.py | filiptronicek/constants | 404b6dbff33cff006d8f38ff08f6a4c2eaa19cdf | [
"MIT"
]
| null | null | null | nums = [0,1]
def calcFi():
n1 = nums[-2]
n2 = nums[-1]
sM = n1 + n2
phi = sM/n2
nums.append(sM)
return (phi)
for i in range(45):
if i % 15 == 0 or i == 44:
phi = calcFi()
print(phi)
if i == 44:
with open("outputs/phi.txt", "w") as f:
f.write(str(phi))
else:
calcFi() | 17.285714 | 51 | 0.429752 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.055096 |
48891373f826c5ea6da77e9ee603e89703e2f555 | 3,657 | py | Python | var_gp/datasets.py | uber-research/var-gp | 1fc3935a84ff58ac529361c53c98e01338244062 | [
"Apache-2.0"
]
| 11 | 2020-07-07T12:31:46.000Z | 2021-04-15T13:38:35.000Z | var_gp/datasets.py | thangbui/var-gp | c445bb5f3b961d179c6741227200329a89517d91 | [
"Apache-2.0"
]
| 1 | 2021-02-27T20:46:27.000Z | 2021-04-22T16:48:10.000Z | var_gp/datasets.py | uber-research/var-gp | 1fc3935a84ff58ac529361c53c98e01338244062 | [
"Apache-2.0"
]
| 4 | 2020-07-06T20:25:34.000Z | 2020-08-19T15:23:40.000Z | import os
import glob
import torch
import numpy as np
# from PIL import Image, UnidentifiedImageError
from torch.utils.data import Dataset
from torchvision.datasets import MNIST
class ToyDataset(Dataset):
def __init__(self, N_K=50, K=4, X=None, Y=None):
super().__init__()
if X is not None:
self.data, self.targets = X, Y
else:
self.data, self.targets = self._init_data(N_K, K)
self.task_ids = torch.arange(self.targets.size(0))
def _init_data(self, N_K, K):
X1 = torch.cat([
0.8 + 0.4 * torch.randn(N_K, 1),
1.5 + 0.4 * torch.randn(N_K, 1),
], dim=-1)
Y1 = 0 * torch.ones(X1.size(0)).long()
X2 = torch.cat([
0.5 + 0.6 * torch.randn(N_K, 1),
-0.2 - 0.1 * torch.randn(N_K, 1),
], dim=-1)
Y2 = 1 * torch.ones(X2.size(0)).long()
X3 = torch.cat([
2.5 - 0.1 * torch.randn(N_K, 1),
1.0 + 0.6 * torch.randn(N_K, 1),
], dim=-1)
Y3 = 2 * torch.ones(X3.size(0)).long()
X4 = torch.distributions.MultivariateNormal(
torch.Tensor([-0.5, 1.5]),
covariance_matrix=torch.Tensor([[0.2, 0.1], [0.1, 0.1]])).sample(torch.Size([N_K]))
Y4 = 3 * torch.ones(X4.size(0)).long()
X = torch.cat([X1, X2, X3, X4], dim=0)
X[:, 1] -= 1
X[:, 0] -= 0.5
Y = torch.cat([Y1, Y2, Y3, Y4])
return X, Y
def filter_by_class(self, class_list=None):
if class_list:
mask = torch.zeros_like(self.targets).bool()
for c in class_list:
mask |= self.targets == c
else:
mask = torch.ones_like(self.targets).bool()
self.task_ids = torch.masked_select(torch.arange(self.targets.size(0)), mask)
def __getitem__(self, index):
return self.data[self.task_ids[index]], self.targets[self.task_ids[index]]
def __len__(self):
return self.task_ids.size(0)
class SplitMNIST(MNIST):
def __init__(self, *args, **kwargs):
kwargs['download'] = True
super().__init__(*args, **kwargs)
self.data = self.data.reshape(self.data.size(0), -1).float() / 255.
self.task_ids = torch.arange(self.targets.size(0))
def filter_by_class(self, class_list=None):
if class_list:
mask = torch.zeros_like(self.targets).bool()
for c in class_list:
mask |= self.targets == c
else:
mask = torch.ones_like(self.targets).bool()
self.task_ids = torch.masked_select(torch.arange(self.targets.size(0)), mask)
def filter_by_idx(self, idx):
self.data = self.data[idx]
self.targets = self.targets[idx]
self.task_ids = torch.arange(self.targets.size(0))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
return self.data[self.task_ids[index]], self.targets[self.task_ids[index]]
def __len__(self):
return self.task_ids.size(0)
class PermutedMNIST(MNIST):
@staticmethod
def create_tasks(n=1):
return [torch.randperm(784) for _ in range(n)]
def __init__(self, *args, **kwargs):
kwargs['download'] = True
super().__init__(*args, **kwargs)
self.data = self.data.reshape(self.data.size(0), -1).float() / 255.
self.perm = None
def set_task(self, perm):
assert self.perm is None, 'Cannot set task again.'
self.data = self.data[:, perm]
self.perm = perm
def filter_by_idx(self, idx):
self.data = self.data[idx]
self.targets = self.targets[idx]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
return self.data[index], self.targets[index]
| 26.309353 | 95 | 0.618266 | 3,470 | 0.948865 | 0 | 0 | 89 | 0.024337 | 0 | 0 | 363 | 0.099262 |
488967f137108794225619fe7bf8270b7eb07b12 | 6,834 | py | Python | apps/ouvertime_record/views.py | dnetochaves/repense_rh | ba549bdadc90c088f258d9d640bd59fd696bb705 | [
"MIT"
]
| null | null | null | apps/ouvertime_record/views.py | dnetochaves/repense_rh | ba549bdadc90c088f258d9d640bd59fd696bb705 | [
"MIT"
]
| 3 | 2021-01-22T06:05:42.000Z | 2021-02-16T10:06:36.000Z | apps/ouvertime_record/views.py | dnetochaves/repense_rh | ba549bdadc90c088f258d9d640bd59fd696bb705 | [
"MIT"
]
| null | null | null | from django.shortcuts import render, HttpResponse
from django.views.generic.list import ListView
from django.views.generic.edit import UpdateView, DeleteView, CreateView
from . models import OuverTimeRecord
from django.contrib.auth.models import User
from django.urls import reverse_lazy
from django.views import View
import json
import csv
# Import for reportlab
import io
from django.http import FileResponse
from reportlab.pdfgen import canvas
# Import for Xhtm2
from django.template.loader import get_template
from xhtml2pdf import pisa
#import Xlwt
import xlwt
def index(request):
return HttpResponse('ok')
class OuverTimeRecordListView(ListView):
model = OuverTimeRecord
# paginate_by = 100 # if pagination is desired
def get_queryset(self):
logged_company = self.request.user.employee.company.id
queryset = OuverTimeRecord.objects.filter(employee=logged_company)
return queryset
class OuverTimeRecordUpdate(UpdateView):
model = OuverTimeRecord
fields = ['reason', 'hours']
#Metodo desabilitado por mudança de regra
#def form_valid(self, form):
# obj = form.save(commit=False)
# obj.employee = self.request.user.employee
# obj.save()
# return super(OuverTimeRecordUpdate, self).form_valid(form)
class OuverTimeRecordDelete(DeleteView):
model = OuverTimeRecord
success_url = reverse_lazy('ouvertime_record:ouver-time')
class OuverTimeRecordCreate(CreateView):
model = OuverTimeRecord
fields = ['reason', 'hours']
def form_valid(self, form):
obj = form.save(commit=False)
obj.employee = self.request.user.employee
obj.save()
return super(OuverTimeRecordCreate, self).form_valid(form)
class UtilizouHoraExtra(View):
def post(self, *args, **kwargs):
used = OuverTimeRecord.objects.get(id=kwargs['pk'])
used.used = True
used.save()
employee = self.request.user.employee
response = json.dumps(
{'mensagem': 'Utilizado', 'hours': float(employee.sum_overtime)})
return HttpResponse(response, content_type='application/json')
class CheckedFalse(View):
def post(self, *args, **kwargs):
used = OuverTimeRecord.objects.get(id=kwargs['pk'])
used.used = False
used.save()
employee = self.request.user.employee
response = json.dumps(
{'mensagem': 'Não Utilizado', 'hours': float(employee.sum_overtime)})
return HttpResponse(response, content_type='application/json')
# ReportLab
def some_view(request):
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="mypdf.pdf"'
buffer = io.BytesIO()
p = canvas.Canvas(buffer)
p.drawString(200, 810, 'Relatorio de Horas ReportLab')
times = OuverTimeRecord.objects.filter(
employee=request.user.employee.company.id)
y = 790
for time in times:
p.drawString(10, y, time.reason)
p.drawString(100, y, time.employee.name)
p.drawString(200, y, str(time.hours))
p.drawString(300, y, str(time.used))
y -= 40
p.showPage()
p.save()
pdf = buffer.getvalue()
buffer.close()
response.write(pdf)
return response
# Xhtml2
def link_callback(uri, rel):
"""
Convert HTML URIs to absolute system paths so xhtml2pdf can access those
resources
"""
result = finders.find(uri)
if result:
if not isinstance(result, (list, tuple)):
result = [result]
result = list(os.path.realpath(path) for path in result)
path = result[0]
else:
sUrl = settings.STATIC_URL # Typically /static/
sRoot = settings.STATIC_ROOT # Typically /home/userX/project_static/
mUrl = settings.MEDIA_URL # Typically /media/
mRoot = settings.MEDIA_ROOT # Typically /home/userX/project_static/media/
if uri.startswith(mUrl):
path = os.path.join(mRoot, uri.replace(mUrl, ""))
elif uri.startswith(sUrl):
path = os.path.join(sRoot, uri.replace(sUrl, ""))
else:
return uri
# make sure that file exists
if not os.path.isfile(path):
raise Exception(
'media URI must start with %s or %s' % (sUrl, mUrl)
)
return path
def render_pdf_view(request):
template_path = 'ouvertime_record/time_report.html'
cols = OuverTimeRecord.objects.filter(
employee=request.user.employee.company.id)
context = {'cols': cols}
# Create a Django response object, and specify content_type as pdf
response = HttpResponse(content_type='application/pdf')
# response['Content-Disposition'] = 'attachment; filename="report.pdf"'
response['Content-Disposition'] = 'attachment; filename="time-report.pdf"'
# find the template and render it.
template = get_template(template_path)
html = template.render(context)
# create a pdf
pisa_status = pisa.CreatePDF(
html, dest=response, link_callback=link_callback)
# if error then show some funy view
if pisa_status.err:
return HttpResponse('We had some errors <pre>' + html + '</pre>')
return response
class ExportCsv(View):
def get(self, request):
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="somefilename.csv"'
times = OuverTimeRecord.objects.filter(
employee=request.user.employee.company.id)
writer = csv.writer(response)
writer.writerow(['Reason', 'Employee', 'Hours', 'Used'])
for time in times:
writer.writerow(
[time.reason, time.employee.name, time.hours, time.used])
return response
# Excel
class ExportExcel(View):
def get(self, request):
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename="export_excel.xls"'
wb = xlwt.Workbook(encoding='utf-8')
ws = wb.add_sheet('export_excel')
row_num = 0
columns = ['Reason', 'Employee', 'Hours', 'Used']
for col_num in range(len(columns)):
ws.write(row_num, col_num, columns[col_num])
font_style = xlwt.XFStyle()
times = OuverTimeRecord.objects.filter(
employee=request.user.employee.company.id)
row_num = 1
for time in times:
ws.write(row_num, 0, time.reason)
ws.write(row_num, 1, time.employee.name)
ws.write(row_num, 2, time.hours)
ws.write(row_num, 3, time.used)
row_num += 1
wb.save(response)
return response
| 29.205128 | 87 | 0.655692 | 3,474 | 0.508192 | 0 | 0 | 0 | 0 | 0 | 0 | 1,590 | 0.232592 |
488992b28e85b2d366e7bc829af9dec94065afc7 | 1,256 | py | Python | second_HW/second_hw_2.py | alex123012/Bioinf_HW | c2689715a8579b2ddccbf5dd1f4aed7dd4a52baa | [
"MIT"
]
| null | null | null | second_HW/second_hw_2.py | alex123012/Bioinf_HW | c2689715a8579b2ddccbf5dd1f4aed7dd4a52baa | [
"MIT"
]
| null | null | null | second_HW/second_hw_2.py | alex123012/Bioinf_HW | c2689715a8579b2ddccbf5dd1f4aed7dd4a52baa | [
"MIT"
]
| 1 | 2021-04-12T11:29:27.000Z | 2021-04-12T11:29:27.000Z | import unittest as ut
import time
class test_magick(ut.TestCase):
def test_us(self):
list_r = list(range(0, 100))
for i in list_r:
with self.subTest(case=i):
self.assertEqual(magick(i), i)
def magick(x=None, start=0, stop=100):
yes = ['да', 'д', 'yes', 'y', 'ye']
while (stop >= start):
current_state = (start + stop) // 2
if x is None:
ans = input(f'Верно ли, что загаданное число меньше {current_state}?').lower()
if ans in yes:
stop = current_state - 1
else:
start = current_state + 1
elif current_state > x:
stop = current_state - 1
else:
start = current_state + 1
return stop
def main():
x = float(input('Введите число: '))
print('ваше число:', magick())
print('\n\n')
def test():
start = time.time()
magick(123123123123, 0, 10e100)
print(time.time() - start, '\n')
start = time.time()
magick(123123123123, 0, 10e250)
print(time.time() - start, '\n')
start = time.time()
magick(123123123123, 0, 10e500)
print(time.time() - start, '\n')
ut.main()
if __name__ == '__main__':
main()
# test()
| 20.933333 | 90 | 0.535032 | 202 | 0.154081 | 0 | 0 | 0 | 0 | 0 | 0 | 197 | 0.150267 |
6f7d0aa4d2bf1ce8c8c464a58fa65d15faebfee4 | 4,593 | py | Python | research/vrgripper/episode_to_transitions.py | Xtuden-com/tensor2robot | a3674958a046de711e37445d39afd4e529d8dd09 | [
"Apache-2.0"
]
| 1 | 2021-10-18T01:27:04.000Z | 2021-10-18T01:27:04.000Z | research/vrgripper/episode_to_transitions.py | Xtuden-com/tensor2robot | a3674958a046de711e37445d39afd4e529d8dd09 | [
"Apache-2.0"
]
| null | null | null | research/vrgripper/episode_to_transitions.py | Xtuden-com/tensor2robot | a3674958a046de711e37445d39afd4e529d8dd09 | [
"Apache-2.0"
]
| 1 | 2020-09-21T08:29:02.000Z | 2020-09-21T08:29:02.000Z | # coding=utf-8
# Copyright 2020 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Functions for converting env episode data to tfrecords of transitions."""
import collections
import gin
import numpy as np
from PIL import Image
import six
from six.moves import range
import tensorflow.compat.v1 as tf
_bytes_feature = (
lambda v: tf.train.Feature(bytes_list=tf.train.BytesList(value=v)))
_int64_feature = (
lambda v: tf.train.Feature(int64_list=tf.train.Int64List(value=v)))
_float_feature = (
lambda v: tf.train.Feature(float_list=tf.train.FloatList(value=v)))
_IMAGE_KEY_PREFIX = 'image'
@gin.configurable
def make_fixed_length(
input_list,
fixed_length,
always_include_endpoints=True,
randomized=True):
"""Create a fixed length list by sampling entries from input_list.
Args:
input_list: The original list we sample entries from.
fixed_length: An integer: the desired length of the output list.
always_include_endpoints: If True, always include the first and last entries
of input_list in the output.
randomized: If True, select entries from input_list by random sampling with
replacement. If False, select entries from input_list deterministically.
Returns:
A list of length fixed_length containing sampled entries of input_list.
"""
original_length = len(input_list)
if original_length <= 2:
return None
if not randomized:
indices = np.sort(np.mod(np.arange(fixed_length), original_length))
return [input_list[i] for i in indices]
if always_include_endpoints:
# Always include entries 0 and N-1.
endpoint_indices = np.array([0, original_length - 1])
# The remaining (fixed_length-2) frames are sampled with replacement
# from entries [1, N-1) of input_list.
other_indices = 1 + np.random.choice(
original_length - 2, fixed_length-2, replace=True)
indices = np.concatenate(
(endpoint_indices, other_indices),
axis=0)
else:
indices = np.random.choice(
original_length, fixed_length, replace=True)
indices = np.sort(indices)
return [input_list[i] for i in indices]
@gin.configurable
def episode_to_transitions_reacher(episode_data, is_demo=False):
"""Converts reacher env data to transition examples."""
transitions = []
for i, transition in enumerate(episode_data):
del i
feature_dict = {}
(obs_t, action, reward, obs_tp1, done, debug) = transition
del debug
feature_dict['pose_t'] = _float_feature(obs_t)
feature_dict['pose_tp1'] = _float_feature(obs_tp1)
feature_dict['action'] = _float_feature(action)
feature_dict['reward'] = _float_feature([reward])
feature_dict['done'] = _int64_feature([int(done)])
feature_dict['is_demo'] = _int64_feature([int(is_demo)])
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
transitions.append(example)
return transitions
@gin.configurable
def episode_to_transitions_metareacher(episode_data):
"""Converts metareacher env data to transition examples."""
context_features = {}
feature_lists = collections.defaultdict(list)
context_features['is_demo'] = _int64_feature(
[int(episode_data[0][-1]['is_demo'])])
context_features['target_idx'] = _int64_feature(
[episode_data[0][-1]['target_idx']])
for i, transition in enumerate(episode_data):
del i
(obs_t, action, reward, obs_tp1, done, debug) = transition
del debug
feature_lists['pose_t'].append(_float_feature(obs_t))
feature_lists['pose_tp1'].append(_float_feature(obs_tp1))
feature_lists['action'].append(_float_feature(action))
feature_lists['reward'].append(_float_feature([reward]))
feature_lists['done'].append(_int64_feature([int(done)]))
tf_feature_lists = {}
for key in feature_lists:
tf_feature_lists[key] = tf.train.FeatureList(feature=feature_lists[key])
return [tf.train.SequenceExample(
context=tf.train.Features(feature=context_features),
feature_lists=tf.train.FeatureLists(feature_list=tf_feature_lists))]
| 34.533835 | 80 | 0.735249 | 0 | 0 | 0 | 0 | 3,429 | 0.746571 | 0 | 0 | 1,646 | 0.358371 |
6f7dbf265364f7b257659f2bfab6a7eebd22f8b8 | 44,533 | py | Python | flopy/plot/map.py | mwtoews/flopy | e566845172380e3eae06981ca180923d2362ee56 | [
"CC0-1.0",
"BSD-3-Clause"
]
| null | null | null | flopy/plot/map.py | mwtoews/flopy | e566845172380e3eae06981ca180923d2362ee56 | [
"CC0-1.0",
"BSD-3-Clause"
]
| null | null | null | flopy/plot/map.py | mwtoews/flopy | e566845172380e3eae06981ca180923d2362ee56 | [
"CC0-1.0",
"BSD-3-Clause"
]
| null | null | null | import numpy as np
from ..discretization import StructuredGrid, UnstructuredGrid
from ..utils import geometry
try:
import matplotlib.pyplot as plt
import matplotlib.colors
from matplotlib.collections import PathCollection, LineCollection
from matplotlib.path import Path
except (ImportError, ModuleNotFoundError):
plt = None
from . import plotutil
import warnings
warnings.simplefilter("always", PendingDeprecationWarning)
class PlotMapView:
"""
Class to create a map of the model. Delegates plotting
functionality based on model grid type.
Parameters
----------
modelgrid : flopy.discretization.Grid
The modelgrid class can be StructuredGrid, VertexGrid,
or UnstructuredGrid (Default is None)
ax : matplotlib.pyplot axis
The plot axis. If not provided it, plt.gca() will be used.
If there is not a current axis then a new one will be created.
model : flopy.modflow object
flopy model object. (Default is None)
layer : int
Layer to plot. Default is 0. Must be between 0 and nlay - 1.
extent : tuple of floats
(xmin, xmax, ymin, ymax) will be used to specify axes limits. If None
then these will be calculated based on grid, coordinates, and rotation.
Notes
-----
"""
def __init__(
self, model=None, modelgrid=None, ax=None, layer=0, extent=None
):
if plt is None:
s = (
"Could not import matplotlib. Must install matplotlib "
+ " in order to use ModelMap method"
)
raise ImportError(s)
self.model = model
self.layer = layer
self.mg = None
if modelgrid is not None:
self.mg = modelgrid
elif model is not None:
self.mg = model.modelgrid
else:
err_msg = "A model grid instance must be provided to PlotMapView"
raise AssertionError(err_msg)
if ax is None:
try:
self.ax = plt.gca()
self.ax.set_aspect("equal")
except (AttributeError, ValueError):
self.ax = plt.subplot(1, 1, 1, aspect="equal", axisbg="white")
else:
self.ax = ax
if extent is not None:
self._extent = extent
else:
self._extent = None
@property
def extent(self):
if self._extent is None:
self._extent = self.mg.extent
return self._extent
def plot_array(self, a, masked_values=None, **kwargs):
"""
Plot an array. If the array is three-dimensional, then the method
will plot the layer tied to this class (self.layer).
Parameters
----------
a : numpy.ndarray
Array to plot.
masked_values : iterable of floats, ints
Values to mask.
**kwargs : dictionary
keyword arguments passed to matplotlib.pyplot.pcolormesh
Returns
-------
quadmesh : matplotlib.collections.QuadMesh or
matplotlib.collections.PatchCollection
"""
if not isinstance(a, np.ndarray):
a = np.array(a)
# Use the model grid to pass back an array of the correct shape
plotarray = self.mg.get_plottable_layer_array(a, self.layer)
# if masked_values are provided mask the plotting array
if masked_values is not None:
for mval in masked_values:
plotarray = np.ma.masked_values(plotarray, mval)
# add NaN values to mask
plotarray = np.ma.masked_where(np.isnan(plotarray), plotarray)
ax = kwargs.pop("ax", self.ax)
# use cached patch collection for plotting
polygons = self.mg.map_polygons
if isinstance(polygons, dict):
polygons = polygons[self.layer]
if len(polygons) == 0:
return
if not isinstance(polygons[0], Path):
collection = ax.pcolormesh(
self.mg.xvertices, self.mg.yvertices, plotarray
)
else:
plotarray = plotarray.ravel()
collection = PathCollection(polygons)
collection.set_array(plotarray)
# set max and min
vmin = kwargs.pop("vmin", None)
vmax = kwargs.pop("vmax", None)
# set matplotlib kwargs
collection.set_clim(vmin=vmin, vmax=vmax)
collection.set(**kwargs)
ax.add_collection(collection)
# set limits
ax.set_xlim(self.extent[0], self.extent[1])
ax.set_ylim(self.extent[2], self.extent[3])
return collection
def contour_array(self, a, masked_values=None, **kwargs):
"""
Contour an array. If the array is three-dimensional, then the method
will contour the layer tied to this class (self.layer).
Parameters
----------
a : numpy.ndarray
Array to plot.
masked_values : iterable of floats, ints
Values to mask.
**kwargs : dictionary
keyword arguments passed to matplotlib.pyplot.pcolormesh
Returns
-------
contour_set : matplotlib.pyplot.contour
"""
try:
import matplotlib.tri as tri
except ImportError:
err_msg = "matplotlib must be installed to use contour_array()"
raise ImportError(err_msg)
a = np.copy(a)
if not isinstance(a, np.ndarray):
a = np.array(a)
# Use the model grid to pass back an array of the correct shape
plotarray = self.mg.get_plottable_layer_array(a, self.layer)
# work around for tri-contour ignore vmin & vmax
# necessary block for tri-contour NaN issue
if "levels" not in kwargs:
vmin = kwargs.pop("vmin", np.nanmin(plotarray))
vmax = kwargs.pop("vmax", np.nanmax(plotarray))
levels = np.linspace(vmin, vmax, 7)
kwargs["levels"] = levels
# workaround for tri-contour nan issue
# use -2**31 to allow for 32 bit int arrays
plotarray[np.isnan(plotarray)] = -(2 ** 31)
if masked_values is None:
masked_values = [-(2 ** 31)]
else:
masked_values = list(masked_values)
if -(2 ** 31) not in masked_values:
masked_values.append(-(2 ** 31))
ismasked = None
if masked_values is not None:
for mval in masked_values:
if ismasked is None:
ismasked = np.isclose(plotarray, mval)
else:
t = np.isclose(plotarray, mval)
ismasked += t
ax = kwargs.pop("ax", self.ax)
if "colors" in kwargs.keys():
if "cmap" in kwargs.keys():
kwargs.pop("cmap")
plot_triplot = False
if "plot_triplot" in kwargs:
plot_triplot = kwargs.pop("plot_triplot")
# Get vertices for the selected layer
xcentergrid = self.mg.get_xcellcenters_for_layer(self.layer)
ycentergrid = self.mg.get_ycellcenters_for_layer(self.layer)
if "extent" in kwargs:
extent = kwargs.pop("extent")
idx = (
(xcentergrid >= extent[0])
& (xcentergrid <= extent[1])
& (ycentergrid >= extent[2])
& (ycentergrid <= extent[3])
)
plotarray = plotarray[idx]
xcentergrid = xcentergrid[idx]
ycentergrid = ycentergrid[idx]
plotarray = plotarray.flatten()
xcentergrid = xcentergrid.flatten()
ycentergrid = ycentergrid.flatten()
triang = tri.Triangulation(xcentergrid, ycentergrid)
if ismasked is not None:
ismasked = ismasked.flatten()
mask = np.any(
np.where(ismasked[triang.triangles], True, False), axis=1
)
triang.set_mask(mask)
contour_set = ax.tricontour(triang, plotarray, **kwargs)
if plot_triplot:
ax.triplot(triang, color="black", marker="o", lw=0.75)
ax.set_xlim(self.extent[0], self.extent[1])
ax.set_ylim(self.extent[2], self.extent[3])
return contour_set
def plot_inactive(self, ibound=None, color_noflow="black", **kwargs):
"""
Make a plot of inactive cells. If not specified, then pull ibound
from the self.ml
Parameters
----------
ibound : numpy.ndarray
ibound array to plot. (Default is ibound in 'BAS6' package.)
color_noflow : string
(Default is 'black')
Returns
-------
quadmesh : matplotlib.collections.QuadMesh
"""
if ibound is None:
if self.mg.idomain is None:
raise AssertionError("Ibound/Idomain array must be provided")
ibound = self.mg.idomain
plotarray = np.zeros(ibound.shape, dtype=int)
idx1 = ibound == 0
plotarray[idx1] = 1
plotarray = np.ma.masked_equal(plotarray, 0)
cmap = matplotlib.colors.ListedColormap(["0", color_noflow])
bounds = [0, 1, 2]
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
quadmesh = self.plot_array(plotarray, cmap=cmap, norm=norm, **kwargs)
return quadmesh
def plot_ibound(
self,
ibound=None,
color_noflow="black",
color_ch="blue",
color_vpt="red",
**kwargs
):
"""
Make a plot of ibound. If not specified, then pull ibound from the
self.ml
Parameters
----------
ibound : numpy.ndarray
ibound array to plot. (Default is ibound in the modelgrid)
color_noflow : string
(Default is 'black')
color_ch : string
Color for constant heads (Default is 'blue'.)
color_vpt: string
Color for vertical pass through cells (Default is 'red')
Returns
-------
quadmesh : matplotlib.collections.QuadMesh
"""
if ibound is None:
if self.model is not None:
if self.model.version == "mf6":
color_ch = color_vpt
if self.mg.idomain is None:
raise AssertionError("Ibound/Idomain array must be provided")
ibound = self.mg.idomain
plotarray = np.zeros(ibound.shape, dtype=int)
idx1 = ibound == 0
idx2 = ibound < 0
plotarray[idx1] = 1
plotarray[idx2] = 2
plotarray = np.ma.masked_equal(plotarray, 0)
cmap = matplotlib.colors.ListedColormap(["0", color_noflow, color_ch])
bounds = [0, 1, 2, 3]
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
quadmesh = self.plot_array(plotarray, cmap=cmap, norm=norm, **kwargs)
return quadmesh
def plot_grid(self, **kwargs):
"""
Plot the grid lines.
Parameters
----------
kwargs : ax, colors. The remaining kwargs are passed into the
the LineCollection constructor.
Returns
-------
lc : matplotlib.collections.LineCollection
"""
from matplotlib.collections import PatchCollection
ax = kwargs.pop("ax", self.ax)
colors = kwargs.pop("colors", "grey")
colors = kwargs.pop("color", colors)
colors = kwargs.pop("ec", colors)
colors = kwargs.pop("edgecolor", colors)
grid_lines = self.mg.grid_lines
if isinstance(grid_lines, dict):
grid_lines = grid_lines[self.layer]
collection = LineCollection(grid_lines, colors=colors, **kwargs)
ax.add_collection(collection)
ax.set_xlim(self.extent[0], self.extent[1])
ax.set_ylim(self.extent[2], self.extent[3])
return collection
def plot_bc(
self,
name=None,
package=None,
kper=0,
color=None,
plotAll=False,
**kwargs
):
"""
Plot boundary conditions locations for a specific boundary
type from a flopy model
Parameters
----------
name : string
Package name string ('WEL', 'GHB', etc.). (Default is None)
package : flopy.modflow.Modflow package class instance
flopy package class instance. (Default is None)
kper : int
Stress period to plot
color : string
matplotlib color string. (Default is None)
plotAll : bool
Boolean used to specify that boundary condition locations for all
layers will be plotted on the current ModelMap layer.
(Default is False)
**kwargs : dictionary
keyword arguments passed to matplotlib.collections.PatchCollection
Returns
-------
quadmesh : matplotlib.collections.QuadMesh
"""
if "ftype" in kwargs and name is None:
name = kwargs.pop("ftype")
# Find package to plot
if package is not None:
p = package
name = p.name[0]
elif self.model is not None:
if name is None:
raise Exception("ftype not specified")
name = name.upper()
p = self.model.get_package(name)
else:
raise Exception("Cannot find package to plot")
# trap for mf6 'cellid' vs mf2005 'k', 'i', 'j' convention
if isinstance(p, list) or p.parent.version == "mf6":
if not isinstance(p, list):
p = [p]
idx = np.array([])
for pp in p:
if pp.package_type in ("lak", "sfr", "maw", "uzf"):
t = plotutil.advanced_package_bc_helper(pp, self.mg, kper)
else:
try:
mflist = pp.stress_period_data.array[kper]
except Exception as e:
raise Exception(
"Not a list-style boundary package: " + str(e)
)
if mflist is None:
return
t = np.array(
[list(i) for i in mflist["cellid"]], dtype=int
).T
if len(idx) == 0:
idx = np.copy(t)
else:
idx = np.append(idx, t, axis=1)
else:
# modflow-2005 structured and unstructured grid
if p.package_type in ("uzf", "lak"):
idx = plotutil.advanced_package_bc_helper(p, self.mg, kper)
else:
try:
mflist = p.stress_period_data[kper]
except Exception as e:
raise Exception(
"Not a list-style boundary package: " + str(e)
)
if mflist is None:
return
if len(self.mg.shape) == 3:
idx = [mflist["k"], mflist["i"], mflist["j"]]
else:
idx = mflist["node"]
nlay = self.mg.nlay
plotarray = np.zeros(self.mg.shape, dtype=int)
if plotAll and len(self.mg.shape) > 1:
pa = np.zeros(self.mg.shape[1:], dtype=int)
pa[tuple(idx[1:])] = 1
for k in range(nlay):
plotarray[k] = pa.copy()
else:
plotarray[tuple(idx)] = 1
# mask the plot array
plotarray = np.ma.masked_equal(plotarray, 0)
# set the colormap
if color is None:
# modflow 6 ftype fix, since multiple packages append _0, _1, etc:
key = name[:3].upper()
if key in plotutil.bc_color_dict:
c = plotutil.bc_color_dict[key]
else:
c = plotutil.bc_color_dict["default"]
else:
c = color
cmap = matplotlib.colors.ListedColormap(["0", c])
bounds = [0, 1, 2]
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
# create normalized quadmesh or patch object depending on grid type
quadmesh = self.plot_array(plotarray, cmap=cmap, norm=norm, **kwargs)
return quadmesh
def plot_shapefile(self, shp, **kwargs):
"""
Plot a shapefile. The shapefile must be in the same coordinates as
the rotated and offset grid.
Parameters
----------
shp : string or pyshp shapefile object
Name of the shapefile to plot
kwargs : dictionary
Keyword arguments passed to plotutil.plot_shapefile()
"""
return self.plot_shapes(shp, **kwargs)
def plot_shapes(self, obj, **kwargs):
"""
Plot shapes is a method that facilitates plotting a collection
of geospatial objects
Parameters
----------
obj : collection object
obj can accept the following types
str : shapefile name
shapefile.Reader object
list of [shapefile.Shape, shapefile.Shape,]
shapefile.Shapes object
flopy.utils.geometry.Collection object
list of [flopy.utils.geometry, ...] objects
geojson.GeometryCollection object
geojson.FeatureCollection object
shapely.GeometryCollection object
list of [[vertices], ...]
kwargs : dictionary
keyword arguments passed to plotutil.plot_shapefile()
Returns
-------
matplotlib.Collection object
"""
ax = kwargs.pop("ax", self.ax)
patch_collection = plotutil.plot_shapefile(obj, ax, **kwargs)
return patch_collection
def plot_cvfd(self, verts, iverts, **kwargs):
"""
Plot a cvfd grid. The vertices must be in the same
coordinates as the rotated and offset grid.
Parameters
----------
verts : ndarray
2d array of x and y points.
iverts : list of lists
should be of len(ncells) with a list of vertex number for each cell
kwargs : dictionary
Keyword arguments passed to plotutil.plot_cvfd()
"""
warnings.warn(
"plot_cvfd will be deprecated and will be removed in version "
"3.3.5. Use plot_grid or plot_array",
PendingDeprecationWarning,
)
a = kwargs.pop("a", None)
if a is None:
return self.plot_grid(**kwargs)
else:
return self.plot_array(a, **kwargs)
def contour_array_cvfd(self, vertc, a, masked_values=None, **kwargs):
"""
Contour a cvfd array. If the array is three-dimensional,
then the method will contour the layer tied to this class (self.layer).
The vertices must be in the same coordinates as the rotated and
offset grid.
Parameters
----------
vertc : np.ndarray
Array with of size (nc, 2) with centroid location of cvfd
a : numpy.ndarray
Array to plot.
masked_values : iterable of floats, ints
Values to mask.
**kwargs : dictionary
keyword arguments passed to matplotlib.pyplot.pcolormesh
Returns
-------
contour_set : matplotlib.pyplot.contour
"""
warnings.warn(
"contour_cvfd will be deprecated and removed in version 3.3.5. "
" Use contour_array",
PendingDeprecationWarning,
)
return self.contour_array(a, masked_values=masked_values, **kwargs)
def plot_vector(
self,
vx,
vy,
istep=1,
jstep=1,
normalize=False,
masked_values=None,
**kwargs
):
"""
Plot a vector.
Parameters
----------
vx : np.ndarray
x component of the vector to be plotted (non-rotated)
array shape must be (nlay, nrow, ncol) for a structured grid
array shape must be (nlay, ncpl) for a unstructured grid
vy : np.ndarray
y component of the vector to be plotted (non-rotated)
array shape must be (nlay, nrow, ncol) for a structured grid
array shape must be (nlay, ncpl) for a unstructured grid
istep : int
row frequency to plot (default is 1)
jstep : int
column frequency to plot (default is 1)
normalize : bool
boolean flag used to determine if vectors should be normalized
using the vector magnitude in each cell (default is False)
masked_values : iterable of floats
values to mask
kwargs : matplotlib.pyplot keyword arguments for the
plt.quiver method
Returns
-------
quiver : matplotlib.pyplot.quiver
result of the quiver function
"""
pivot = kwargs.pop("pivot", "middle")
ax = kwargs.pop("ax", self.ax)
# get ibound array to mask inactive cells
ib = np.ones((self.mg.nnodes,), dtype=int)
if self.mg.idomain is not None:
ib = self.mg.idomain.ravel()
xcentergrid = self.mg.get_xcellcenters_for_layer(self.layer)
ycentergrid = self.mg.get_ycellcenters_for_layer(self.layer)
vx = self.mg.get_plottable_layer_array(vx, self.layer)
vy = self.mg.get_plottable_layer_array(vy, self.layer)
ib = self.mg.get_plottable_layer_array(ib, self.layer)
try:
x = xcentergrid[::istep, ::jstep]
y = ycentergrid[::istep, ::jstep]
u = vx[::istep, ::jstep]
v = vy[::istep, ::jstep]
ib = ib[::istep, ::jstep]
except IndexError:
x = xcentergrid[::jstep]
y = ycentergrid[::jstep]
u = vx[::jstep]
v = vy[::jstep]
ib = ib[::jstep]
# if necessary, copy to avoid changing the passed values
if masked_values is not None or normalize:
u = np.copy(u)
v = np.copy(v)
# mask values
if masked_values is not None:
for mval in masked_values:
to_mask = np.logical_or(u == mval, v == mval)
u[to_mask] = np.nan
v[to_mask] = np.nan
# normalize
if normalize:
vmag = np.sqrt(u ** 2.0 + v ** 2.0)
idx = vmag > 0.0
u[idx] /= vmag[idx]
v[idx] /= vmag[idx]
u[ib == 0] = np.nan
v[ib == 0] = np.nan
# rotate and plot, offsets must be zero since
# these are vectors not locations
urot, vrot = geometry.rotate(u, v, 0.0, 0.0, self.mg.angrot_radians)
quiver = ax.quiver(x, y, urot, vrot, pivot=pivot, **kwargs)
return quiver
def plot_specific_discharge(
self, spdis, istep=1, jstep=1, normalize=False, **kwargs
):
"""
DEPRECATED. Use plot_vector() instead, which should follow after
postprocessing.get_specific_discharge().
Method to plot specific discharge from discharge vectors
provided by the cell by cell flow output file. In MODFLOW-6
this option is controled in the NPF options block. This method
uses matplotlib quiver to create a matplotlib plot of the output.
Parameters
----------
spdis : np.recarray
specific discharge recarray from cbc file
istep : int
row frequency to plot. (Default is 1.)
jstep : int
column frequency to plot. (Default is 1.)
normalize : bool
boolean flag used to determine if discharge vectors should
be normalized using the magnitude of the specific discharge in each
cell. (default is False)
kwargs : matplotlib.pyplot keyword arguments for the
plt.quiver method.
Returns
-------
quiver : matplotlib.pyplot.quiver
quiver plot of discharge vectors
"""
warnings.warn(
"plot_specific_discharge() has been deprecated and will be "
"removed in version 3.3.5. Use plot_vector() instead, which "
"should follow after postprocessing.get_specific_discharge()",
DeprecationWarning,
)
if isinstance(spdis, list):
print(
"Warning: Selecting the final stress period from Specific"
" Discharge list"
)
spdis = spdis[-1]
nodes = self.mg.nnodes
qx = np.zeros(nodes)
qy = np.zeros(nodes)
idx = np.array(spdis["node"]) - 1
qx[idx] = spdis["qx"]
qy[idx] = spdis["qy"]
return self.plot_vector(qx, qy, istep, jstep, normalize, **kwargs)
def plot_discharge(
self,
frf=None,
fff=None,
flf=None,
head=None,
istep=1,
jstep=1,
normalize=False,
**kwargs
):
"""
DEPRECATED. Use plot_vector() instead, which should follow after
postprocessing.get_specific_discharge().
Use quiver to plot vectors.
Parameters
----------
frf : numpy.ndarray
MODFLOW's 'flow right face'
fff : numpy.ndarray
MODFLOW's 'flow front face'
flf : numpy.ndarray
MODFLOW's 'flow lower face' (Default is None.)
head : numpy.ndarray
MODFLOW's head array. If not provided, then will assume confined
conditions in order to calculated saturated thickness.
istep : int
row frequency to plot. (Default is 1.)
jstep : int
column frequency to plot. (Default is 1.)
normalize : bool
boolean flag used to determine if discharge vectors should
be normalized using the magnitude of the specific discharge in each
cell. (default is False)
kwargs : dictionary
Keyword arguments passed to plt.quiver()
Returns
-------
quiver : matplotlib.pyplot.quiver
Vectors of specific discharge.
"""
warnings.warn(
"plot_discharge() has been deprecated and will be replaced "
"in version 3.3.5. Use plot_vector() instead, which should "
"follow after postprocessing.get_specific_discharge()",
DeprecationWarning,
)
if self.mg.grid_type != "structured":
err_msg = "Use plot_specific_discharge for " "{} grids".format(
self.mg.grid_type
)
raise NotImplementedError(err_msg)
else:
if self.mg.top is None:
err = (
"StructuredGrid must have top and "
"botm defined to use plot_discharge()"
)
raise AssertionError(err)
delr = self.mg.delr
delc = self.mg.delc
top = np.copy(self.mg.top)
botm = np.copy(self.mg.botm)
laytyp = None
hnoflo = 999.0
hdry = 999.0
laycbd = None
if self.model is not None:
if self.model.laytyp is not None:
laytyp = self.model.laytyp
if self.model.hnoflo is not None:
hnoflo = self.model.hnoflo
if self.model.hdry is not None:
hdry = self.model.hdry
if self.model.laycbd is not None:
laycbd = self.model.laycbd
if laycbd is not None and 1 in laycbd:
active = np.ones((botm.shape[0],), dtype=int)
kon = 0
for cbd in laycbd:
if cbd > 0:
kon += 1
active[kon] = 0
botm = botm[active == 1]
# If no access to head or laytyp, then calculate confined saturated
# thickness by setting laytyp to zeros
if head is None or laytyp is None:
head = np.zeros(botm.shape, np.float32)
laytyp = np.zeros((botm.shape[0],), dtype=int)
# calculate the saturated thickness
sat_thk = plotutil.PlotUtilities.saturated_thickness(
head, top, botm, laytyp, [hnoflo, hdry]
)
# Calculate specific discharge
qx, qy, qz = plotutil.PlotUtilities.centered_specific_discharge(
frf, fff, flf, delr, delc, sat_thk
)
return self.plot_vector(qx, qy, istep, jstep, normalize, **kwargs)
def plot_pathline(self, pl, travel_time=None, **kwargs):
"""
Plot the MODPATH pathlines.
Parameters
----------
pl : list of rec arrays or a single rec array
rec array or list of rec arrays is data returned from
modpathfile PathlineFile get_data() or get_alldata()
methods. Data in rec array is 'x', 'y', 'z', 'time',
'k', and 'particleid'.
travel_time : float or str
travel_time is a travel time selection for the displayed
pathlines. If a float is passed then pathlines with times
less than or equal to the passed time are plotted. If a
string is passed a variety logical constraints can be added
in front of a time value to select pathlines for a select
period of time. Valid logical constraints are <=, <, >=, and
>. For example, to select all pathlines less than 10000 days
travel_time='< 10000' would be passed to plot_pathline.
(default is None)
kwargs : layer, ax, colors. The remaining kwargs are passed
into the LineCollection constructor. If layer='all',
pathlines are output for all layers
Returns
-------
lc : matplotlib.collections.LineCollection
"""
from matplotlib.collections import LineCollection
# make sure pathlines is a list
if not isinstance(pl, list):
pl = [pl]
if "layer" in kwargs:
kon = kwargs.pop("layer")
if isinstance(kon, bytes):
kon = kon.decode()
if isinstance(kon, str):
if kon.lower() == "all":
kon = -1
else:
kon = self.layer
else:
kon = self.layer
marker = kwargs.pop("marker", None)
markersize = kwargs.pop("markersize", None)
markersize = kwargs.pop("ms", markersize)
markercolor = kwargs.pop("markercolor", None)
markerevery = kwargs.pop("markerevery", 1)
ax = kwargs.pop("ax", self.ax)
if "colors" not in kwargs:
kwargs["colors"] = "0.5"
linecol = []
markers = []
for p in pl:
tp = plotutil.filter_modpath_by_travel_time(p, travel_time)
# transform data!
x0r, y0r = geometry.transform(
tp["x"],
tp["y"],
self.mg.xoffset,
self.mg.yoffset,
self.mg.angrot_radians,
)
# build polyline array
arr = np.vstack((x0r, y0r)).T
# select based on layer
if kon >= 0:
kk = p["k"].copy().reshape(p.shape[0], 1)
kk = np.repeat(kk, 2, axis=1)
arr = np.ma.masked_where((kk != kon), arr)
else:
arr = np.ma.asarray(arr)
# append line to linecol if there is some unmasked segment
if not arr.mask.all():
linecol.append(arr)
if not arr.mask.all():
linecol.append(arr)
if marker is not None:
for xy in arr[::markerevery]:
if not np.all(xy.mask):
markers.append(xy)
# create line collection
lc = None
if len(linecol) > 0:
lc = LineCollection(linecol, **kwargs)
ax.add_collection(lc)
if marker is not None:
markers = np.array(markers)
ax.plot(
markers[:, 0],
markers[:, 1],
lw=0,
marker=marker,
color=markercolor,
ms=markersize,
)
return lc
def plot_timeseries(self, ts, travel_time=None, **kwargs):
"""
Plot the MODPATH timeseries.
Parameters
----------
ts : list of rec arrays or a single rec array
rec array or list of rec arrays is data returned from
modpathfile TimeseriesFile get_data() or get_alldata()
methods. Data in rec array is 'x', 'y', 'z', 'time',
'k', and 'particleid'.
travel_time : float or str
travel_time is a travel time selection for the displayed
pathlines. If a float is passed then pathlines with times
less than or equal to the passed time are plotted. If a
string is passed a variety logical constraints can be added
in front of a time value to select pathlines for a select
period of time. Valid logical constraints are <=, <, >=, and
>. For example, to select all pathlines less than 10000 days
travel_time='< 10000' would be passed to plot_pathline.
(default is None)
kwargs : layer, ax, colors. The remaining kwargs are passed
into the LineCollection constructor. If layer='all',
pathlines are output for all layers
Returns
-------
lo : list of Line2D objects
"""
if "color" in kwargs:
kwargs["markercolor"] = kwargs["color"]
return self.plot_pathline(ts, travel_time=travel_time, **kwargs)
def plot_endpoint(
self,
ep,
direction="ending",
selection=None,
selection_direction=None,
**kwargs
):
"""
Plot the MODPATH endpoints.
Parameters
----------
ep : rec array
A numpy recarray with the endpoint particle data from the
MODPATH 6 endpoint file
direction : str
String defining if starting or ending particle locations should be
considered. (default is 'ending')
selection : tuple
tuple that defines the zero-base layer, row, column location
(l, r, c) to use to make a selection of particle endpoints.
The selection could be a well location to determine capture zone
for the well. If selection is None, all particle endpoints for
the user-sepcified direction will be plotted. (default is None)
selection_direction : str
String defining is a selection should be made on starting or
ending particle locations. If selection is not None and
selection_direction is None, the selection direction will be set
to the opposite of direction. (default is None)
kwargs : ax, c, s or size, colorbar, colorbar_label, shrink. The
remaining kwargs are passed into the matplotlib scatter
method. If colorbar is True a colorbar will be added to the plot.
If colorbar_label is passed in and colorbar is True then
colorbar_label will be passed to the colorbar set_label()
method. If shrink is passed in and colorbar is True then
the colorbar size will be set using shrink.
Returns
-------
sp : matplotlib.pyplot.scatter
"""
ax = kwargs.pop("ax", self.ax)
tep, _, xp, yp = plotutil.parse_modpath_selection_options(
ep, direction, selection, selection_direction
)
# scatter kwargs that users may redefine
if "c" not in kwargs:
c = tep["time"] - tep["time0"]
else:
c = np.empty((tep.shape[0]), dtype="S30")
c.fill(kwargs.pop("c"))
s = kwargs.pop("s", np.sqrt(50))
s = float(kwargs.pop("size", s)) ** 2.0
# colorbar kwargs
createcb = kwargs.pop("colorbar", False)
colorbar_label = kwargs.pop("colorbar_label", "Endpoint Time")
shrink = float(kwargs.pop("shrink", 1.0))
# transform data!
x0r, y0r = geometry.transform(
tep[xp],
tep[yp],
self.mg.xoffset,
self.mg.yoffset,
self.mg.angrot_radians,
)
# build array to plot
arr = np.vstack((x0r, y0r)).T
# plot the end point data
sp = ax.scatter(arr[:, 0], arr[:, 1], c=c, s=s, **kwargs)
# add a colorbar for travel times
if createcb:
cb = plt.colorbar(sp, ax=ax, shrink=shrink)
cb.set_label(colorbar_label)
return sp
class DeprecatedMapView(PlotMapView):
"""
Deprecation handler for the PlotMapView class
Parameters
----------
model : flopy.modflow.Modflow object
modelgrid : flopy.discretization.Grid object
ax : matplotlib.pyplot.axes object
layer : int
model layer to plot, default is layer 1
extent : tuple of floats
(xmin, xmax, ymin, ymax) will be used to specify axes limits. If None
then these will be calculated based on grid, coordinates, and rotation.
"""
def __init__(
self, model=None, modelgrid=None, ax=None, layer=0, extent=None
):
super().__init__(
model=model, modelgrid=modelgrid, ax=ax, layer=layer, extent=extent
)
def plot_discharge(
self,
frf,
fff,
dis=None,
flf=None,
head=None,
istep=1,
jstep=1,
normalize=False,
**kwargs
):
"""
Use quiver to plot vectors. Deprecated method that uses
the old function call to pass the method to PlotMapView
Parameters
----------
frf : numpy.ndarray
MODFLOW's 'flow right face'
fff : numpy.ndarray
MODFLOW's 'flow front face'
dis : flopy.modflow.ModflowDis package
Depricated parameter
flf : numpy.ndarray
MODFLOW's 'flow lower face' (Default is None.)
head : numpy.ndarray
MODFLOW's head array. If not provided, then will assume confined
conditions in order to calculated saturated thickness.
istep : int
row frequency to plot. (Default is 1.)
jstep : int
column frequency to plot. (Default is 1.)
normalize : bool
boolean flag used to determine if discharge vectors should
be normalized using the magnitude of the specific discharge in each
cell. (default is False)
kwargs : dictionary
Keyword arguments passed to plt.quiver()
Returns
-------
quiver : matplotlib.pyplot.quiver
Vectors of specific discharge.
"""
if dis is not None:
self.mg = plotutil._depreciated_dis_handler(
modelgrid=self.mg, dis=dis
)
super().plot_discharge(
frf=frf,
fff=fff,
flf=flf,
head=head,
istep=1,
jstep=1,
normalize=normalize,
**kwargs
)
class ModelMap:
"""
DEPRECATED. ModelMap acts as a PlotMapView factory
object. Please migrate to PlotMapView for plotting
functionality and future code compatibility
Parameters
----------
sr : flopy.utils.reference.SpatialReference
The spatial reference class (Default is None)
ax : matplotlib.pyplot axis
The plot axis. If not provided it, plt.gca() will be used.
If there is not a current axis then a new one will be created.
model : flopy.modflow object
flopy model object. (Default is None)
dis : flopy.modflow.ModflowDis object
flopy discretization object. (Default is None)
layer : int
Layer to plot. Default is 0. Must be between 0 and nlay - 1.
xul : float
x coordinate for upper left corner
yul : float
y coordinate for upper left corner. The default is the sum of the
delc array.
rotation : float
Angle of grid rotation around the upper left corner. A positive value
indicates clockwise rotation. Angles are in degrees.
extent : tuple of floats
(xmin, xmax, ymin, ymax) will be used to specify axes limits. If None
then these will be calculated based on grid, coordinates, and rotation.
length_multiplier : float
scaling factor for conversion from model units to another unit
length base ex. ft to m.
Notes
-----
ModelMap must know the position and rotation of the grid in order to make
the plot. This information is contained in the SpatialReference class
(sr), which can be passed. If sr is None, then it looks for sr in dis.
If dis is None, then it looks for sr in model.dis. If all of these
arguments are none, then it uses xul, yul, and rotation. If none of these
arguments are provided, then it puts the lower-left-hand corner of the
grid at (0, 0).
"""
def __new__(
cls,
sr=None,
ax=None,
model=None,
dis=None,
layer=0,
extent=None,
xul=None,
yul=None,
xll=None,
yll=None,
rotation=None,
length_multiplier=None,
):
from ..utils.reference import SpatialReferenceUnstructured
err_msg = (
"ModelMap is deprecated and has been replaced by "
"PlotMapView(). ModelMap will be removed in version 3.3.5; "
"Calling PlotMapView()"
)
warnings.warn(err_msg, DeprecationWarning)
modelgrid = None
if model is not None:
if (xul, yul, xll, yll, rotation) != (
None,
None,
None,
None,
None,
):
modelgrid = plotutil._set_coord_info(
model.modelgrid, xul, yul, xll, yll, rotation
)
elif sr is not None:
if length_multiplier is not None:
sr.length_multiplier = length_multiplier
if (xul, yul, xll, yll, rotation) != (
None,
None,
None,
None,
None,
):
sr.set_spatialreference(xul, yul, xll, yll, rotation)
if isinstance(sr, SpatialReferenceUnstructured):
if dis is not None:
modelgrid = UnstructuredGrid(
vertices=sr.verts,
iverts=sr.iverts,
xcenters=sr.xc,
ycenters=sr.yc,
top=dis.top.array,
botm=dis.botm.array,
ncpl=sr.ncpl,
)
else:
modelgrid = UnstructuredGrid(
vertices=sr.verts,
iverts=sr.iverts,
xcenters=sr.xc,
ycenters=sr.yc,
ncpl=sr.ncpl,
)
elif dis is not None:
modelgrid = StructuredGrid(
delc=sr.delc,
delr=sr.delr,
top=dis.top.array,
botm=dis.botm.array,
xoff=sr.xll,
yoff=sr.yll,
angrot=sr.rotation,
)
else:
modelgrid = StructuredGrid(
delc=sr.delc,
delr=sr.delr,
xoff=sr.xll,
yoff=sr.yll,
angrot=sr.rotation,
)
else:
pass
return DeprecatedMapView(
model=model, modelgrid=modelgrid, ax=ax, layer=layer, extent=extent
)
| 33.110037 | 79 | 0.545663 | 44,078 | 0.989783 | 0 | 0 | 134 | 0.003009 | 0 | 0 | 20,906 | 0.46945 |
6f7e5bc7a0c9e42e5042391f78e81e9e318258c0 | 9,342 | py | Python | pacu/modules/rds__explore_snapshots/main.py | damienjburks/pacu | 5853f9668a7d78945c40d403bf88a47101ba2b3d | [
"BSD-3-Clause"
]
| 1 | 2021-12-22T22:39:49.000Z | 2021-12-22T22:39:49.000Z | pacu/modules/rds__explore_snapshots/main.py | damienjburks/pacu | 5853f9668a7d78945c40d403bf88a47101ba2b3d | [
"BSD-3-Clause"
]
| null | null | null | pacu/modules/rds__explore_snapshots/main.py | damienjburks/pacu | 5853f9668a7d78945c40d403bf88a47101ba2b3d | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python3
import argparse
from pathlib import Path
import json
import random
import string
from botocore.exceptions import ClientError
module_info = {
"name": "rds__explore_snapshots",
"author": "Alexander Morgenstern [email protected]",
"category": "EXFIL",
"one_liner": "Creates copies of running RDS databases to access protected information",
"description": "Creates a snapshot of all database instances, restores new database instances from those snapshots, and then changes the master password to allow access to the copied database. After the database has been created, the connection information is given. After interactions with the database are complete, the temporary resources are deleted. If there is an unexpected crash during the module's execution, the subsequent run of the module will attempt to clean up any leftover temporary resources.",
"services": ["RDS"],
"prerequisite_modules": [],
"external_dependencies": [],
"arguments_to_autocomplete": ["--regions"],
}
parser = argparse.ArgumentParser(add_help=False, description=module_info["description"])
parser.add_argument(
"--regions",
required=False,
default=None,
help="One or more (comma separated) AWS regions in the format us-east-1. Defaults to all session regions.",
)
TEMP_FILE = Path(__file__).parent / "temp.json"
WAIT_CONFIG = {"Delay": 10}
def mark_temp(resource):
if "DBInstanceArn" in resource:
key = "Instances"
identifier = resource["DBInstanceArn"]
else:
key = "Snapshots"
identifier = resource["DBSnapshotArn"]
data = read_temp()
data[key][identifier] = resource
write_temp(data)
def remove_temp(resource):
if "DBInstanceArn" in resource:
key = "Instances"
identifier = resource["DBInstanceArn"]
else:
key = "Snapshots"
identifier = resource["DBSnapshotArn"]
data = read_temp()
del data[key][identifier]
write_temp(data)
def read_temp():
with TEMP_FILE.open("r") as infile:
data = json.load(infile)
return data
def write_temp(data):
with TEMP_FILE.open("w") as outfile:
json.dump(data, outfile, default=str)
def cleanup(pacu):
data = read_temp()
success = True
for instance in data["Instances"]:
client = pacu.get_boto3_client(
"rds", data["Instances"][instance]["AvailabilityZone"][:-1]
)
if not delete_instance(client, instance, pacu.print):
success = False
for snapshot in data["Snapshots"]:
client = pacu.get_boto3_client(
"rds", data["Snapshots"][snapshot]["AvailabilityZone"][:-1]
)
if not delete_snapshot(client, snapshot, pacu.print):
success = False
return success
def main(args, pacu):
"""Main module function, called from Pacu"""
args = parser.parse_args(args)
if args.regions:
regions = args.regions.split(",")
else:
regions = pacu.get_regions("rds")
if not cleanup(pacu):
if pacu.input(" Cleanup Failed. Continue? (y/n) ") != "y":
return {"fail": "Failed to delete temporary data."}
summary_data = {"instances": 0}
for region in regions:
pacu.print("Region: {}".format(region))
client = pacu.get_boto3_client("rds", region)
pacu.print(" Getting RDS instances...")
active_instances = get_all_region_instances(client, pacu.print)
pacu.print(" Found {} RDS instance(s)".format(len(active_instances)))
for instance in active_instances:
prompt = " Target: {} (y/n)? ".format(instance["DBInstanceIdentifier"])
if pacu.input(prompt).lower() != "y":
continue
pacu.print(" Creating temporary snapshot...")
temp_snapshot = create_snapshot_from_instance(client, instance, pacu.print)
if not temp_snapshot:
pacu.print(" Failed to create temporary snapshot")
continue
pacu.print(" Restoring temporary instance from snapshot...")
temp_instance = restore_instance_from_snapshot(
client, temp_snapshot, pacu.print
)
if not temp_instance:
pacu.print(" Failed to create temporary instance")
delete_snapshot(client, temp_snapshot, pacu.print)
continue
process_instance(pacu, client, temp_instance)
pacu.print(" Deleting temporary resources...")
delete_instance(client, temp_instance, pacu.print)
delete_snapshot(client, temp_snapshot, pacu.print)
summary_data["instances"] += 1
if not cleanup(pacu):
summary_data["fail"] = "Failed to delete temporary data."
return summary_data
def process_instance(pacu, client, instance):
waiter = client.get_waiter("db_instance_available")
waiter.wait(
DBInstanceIdentifier=instance["DBInstanceIdentifier"],
WaiterConfig=WAIT_CONFIG,
)
password = "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(20)
)
pacu.print(" Master Password for current instance: {}".format(password))
if modify_master_password(client, instance, password, pacu.print):
pacu.print(" Password Change Successful")
else:
pacu.print(" Password Change Failed")
response = client.describe_db_instances(
DBInstanceIdentifier=instance["DBInstanceIdentifier"]
)
endpoint = response["DBInstances"][0]["Endpoint"]
pacu.print(" Connection Information:")
pacu.print(" Address: {}".format(endpoint["Address"]))
pacu.print(" Port: {}".format(endpoint["Port"]))
pacu.input(" Press enter to process next instance...")
def modify_master_password(client, instance, password, print):
try:
client.modify_db_instance(
DBInstanceIdentifier=instance["DBInstanceIdentifier"],
MasterUserPassword=password,
)
return True
except ClientError as error:
print(" " + error.response["Error"]["Code"])
return False
def restore_instance_from_snapshot(client, snapshot, print):
waiter = client.get_waiter("db_snapshot_available")
waiter.wait(
DBSnapshotIdentifier=snapshot["DBSnapshotIdentifier"],
WaiterConfig=WAIT_CONFIG,
)
try:
response = client.restore_db_instance_from_db_snapshot(
DBInstanceIdentifier=snapshot["DBSnapshotIdentifier"],
DBSnapshotIdentifier=snapshot["DBSnapshotIdentifier"],
)
mark_temp(response["DBInstance"])
return response["DBInstance"]
except ClientError as error:
print(" " + error.response["Error"]["Code"])
return {}
def delete_snapshot(client, snapshot, print):
waiter = client.get_waiter("db_snapshot_available")
waiter.wait(
DBSnapshotIdentifier=snapshot["DBSnapshotIdentifier"],
WaiterConfig=WAIT_CONFIG,
)
try:
response = client.delete_db_snapshot(
DBSnapshotIdentifier=snapshot["DBSnapshotIdentifier"]
)
remove_temp(response["DBSnapshot"])
return True
except ClientError as error:
print(" " + error.response["Error"]["Code"])
return False
def delete_instance(client, instance, print):
waiter = client.get_waiter("db_instance_available")
waiter.wait(
DBInstanceIdentifier=instance["DBInstanceIdentifier"],
WaiterConfig=WAIT_CONFIG,
)
try:
response = client.delete_db_instance(
DBInstanceIdentifier=instance["DBInstanceIdentifier"],
SkipFinalSnapshot=True,
)
remove_temp(response["DBInstance"])
except ClientError as error:
print(" " + error.response["Error"]["Code"])
return False
waiter = client.get_waiter("db_instance_deleted")
waiter.wait(
DBInstanceIdentifier=instance["DBInstanceIdentifier"],
WaiterConfig=WAIT_CONFIG,
)
return True
def create_snapshot_from_instance(client, instance, print):
waiter = client.get_waiter("db_instance_available")
waiter.wait(
DBInstanceIdentifier=instance["DBInstanceIdentifier"],
WaiterConfig=WAIT_CONFIG,
)
try:
response = client.create_db_snapshot(
DBSnapshotIdentifier=instance["DBInstanceIdentifier"] + "-copy",
DBInstanceIdentifier=instance["DBInstanceIdentifier"],
)
mark_temp(response["DBSnapshot"])
return response["DBSnapshot"]
except ClientError as error:
print(" " + error.response["Error"]["Code"])
return {}
def get_all_region_instances(client, print):
out = []
paginator = client.get_paginator("describe_db_instances")
pages = paginator.paginate()
try:
for page in pages:
out.extend(page["DBInstances"])
return out
except ClientError as error:
print(" " + error.response["Error"]["Code"])
return []
def summary(data, pacu_main):
if "fail" in data:
out = data["fail"] + "\n"
else:
out = " No issues cleaning up temporary data\n"
out += " {} Copy Instance(s) Launched".format(data["instances"])
return out
| 34.988764 | 515 | 0.650717 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,771 | 0.296617 |
6f7fca20b708629aa57b407552bb18e0a01540bc | 9,846 | py | Python | mechroutines/trans/_routines/lj.py | sjklipp/mechdriver | 17c3d9bc82116954b331955e87a60e9adc5e1de9 | [
"Apache-2.0"
]
| null | null | null | mechroutines/trans/_routines/lj.py | sjklipp/mechdriver | 17c3d9bc82116954b331955e87a60e9adc5e1de9 | [
"Apache-2.0"
]
| null | null | null | mechroutines/trans/_routines/lj.py | sjklipp/mechdriver | 17c3d9bc82116954b331955e87a60e9adc5e1de9 | [
"Apache-2.0"
]
| null | null | null | """
Executes the automation part of 1DMin
"""
import statistics
import autofile
from autorun import run_script
from mechroutines.trans._routines import _geom as geom
from mechroutines.trans._routines import _gather as gather
from mechroutines.trans.runner import lj as lj_runner
from mechlib import filesys
from mechlib import amech_io
from mechlib.amech_io import printer as ioprinter
def onedmin(spc_name,
spc_dct, thy_dct, etrans_keyword_dct,
run_prefix, save_prefix):
""" Run the task
"""
bath_name = etrans_keyword_dct['bath']
tgt_dct, bath_dct = spc_dct[spc_name], spc_dct[bath_name]
tgt_info = filesys.inf.get_spc_info(tgt_dct)
bath_info = filesys.inf.get_spc_info(bath_dct)
lj_info = filesys.inf.combine_spc_info(tgt_info, bath_info)
# Build the modified thy objs
inp_thy_info = filesys.inf.get_es_info(
etrans_keyword_dct['inplvl'], thy_dct)
run_thy_info = filesys.inf.get_es_info(
etrans_keyword_dct['runlvl'], thy_dct)
tgt_mod_thy_info = filesys.inf.modify_orb_restrict(
tgt_info, inp_thy_info)
bath_mod_thy_info = filesys.inf.modify_orb_restrict(
bath_info, inp_thy_info)
lj_mod_thy_info = filesys.inf.modify_orb_restrict(
lj_info, run_thy_info)
# Build the target conformer filesystem objects
tgt_cnf_run_fs, tgt_cnf_save_fs = filesys.build_fs(
run_prefix, save_prefix, 'CONFORMER',
spc_locs=tgt_info, thy_locs=tgt_mod_thy_info[1:])
tgt_loc_info = filesys.mincnf.min_energy_conformer_locators(
tgt_cnf_save_fs, tgt_mod_thy_info)
tgt_min_cnf_locs, tgt_cnf_save_path = tgt_loc_info
# Create run fs if that directory has been deleted to run the jobs
tgt_cnf_run_fs[-1].create(tgt_min_cnf_locs)
tgt_cnf_run_path = tgt_cnf_run_fs[-1].path(tgt_min_cnf_locs)
# Get options from the dct or es options lst
# tgt_cnf_run_fs[-1].create(tgt_min_cnf_locs)
# tgt_cnf_run_path = filesys.build.cnf_paths_from_locs(
# tgt_cnf_run_fs, [tgt_min_cnf_locs])[0]
# Build the target energy transfer filesystem objects
etrans_run_fs = autofile.fs.energy_transfer(tgt_cnf_run_path)
etrans_save_fs = autofile.fs.energy_transfer(tgt_cnf_save_path)
etrans_locs = bath_info + lj_mod_thy_info[1:4]
# Build the bath conformer filesystem objects
# _, bath_thy_save_path = filesys.build.spc_thy_fs_from_root(
# save_prefix, bath_info, bath_mod_thy_info)
# ioprinter.debug_message('bath path', bath_thy_save_path)
# bath_cnf_save_fs = autofile.fs.conformer(bath_thy_save_path)
# Calculate and save the Lennard-Jones parameters, if needed
run_needed, nsamp_needed = _need_run(
etrans_save_fs, etrans_locs, etrans_keyword_dct)
if run_needed:
_runlj(nsamp_needed,
lj_info, lj_mod_thy_info,
tgt_mod_thy_info, bath_mod_thy_info,
tgt_cnf_save_fs, bath_cnf_save_fs,
etrans_run_fs, etrans_locs,
etrans_keyword_dct)
_savelj(etrans_run_fs, etrans_save_fs, etrans_locs,
etrans_keyword_dct)
else:
epath = etrans_save_fs[-1].file.lennard_jones_epsilon.path(etrans_locs)
spath = etrans_save_fs[-1].file.lennard_jones_sigma.path(etrans_locs)
ioprinter.info_message(
'- Lennard-Jones epsilon found at path {}'.format(epath))
ioprinter.info_message(
'- Lennard-Jones sigma found at path {}'.format(spath))
def _need_run(etrans_save_fs, etrans_locs, etrans_keyword_dct):
""" Check if job needs to run
"""
nsamp = etrans_keyword_dct['nsamp']
overwrite = etrans_keyword_dct['overwrite']
ex1 = etrans_save_fs[-1].file.lennard_jones_epsilon.exists(etrans_locs)
ex2 = etrans_save_fs[-1].file.lennard_jones_sigma.exists(etrans_locs)
if not ex1 or not ex2:
ioprinter.info_message(
'Either no Lennard-Jones epsilon or sigma found in',
'save filesys. Running OneDMin for params...')
run = True
nsamp_need = nsamp
elif overwrite:
ioprinter.info_message(
'User specified to overwrite parameters with new run...')
run = True
nsamp_need = nsamp
else:
inf_obj = etrans_save_fs[-1].file.info.read(etrans_locs)
nsampd = inf_obj.nsamp
if nsamp < nsampd:
run = True
nsamp_need = nsampd - nsamp
else:
run = False
nsamp_need = 0
return run, nsamp_need
def _runlj(nsamp_needed,
lj_info, lj_mod_thy_info,
tgt_mod_thy_info, bath_mod_thy_info,
tgt_cnf_save_fs, bath_cnf_save_fs,
etrans_run_fs, etrans_locs,
etrans_keyword_dct):
""" Run the Lennard-Jones parameters
"""
# Pull stuff from dct
njobs = etrans_keyword_dct['njobs']
smin = etrans_keyword_dct['smin']
smax = etrans_keyword_dct['smax']
conf = etrans_keyword_dct['conf']
# Determine the number of samples per job
nsamp_per_job = nsamp_needed // njobs
# Set the path to the executable
onedmin_exe_path = '/lcrc/project/CMRP/amech/OneDMin/build'
# Obtain the geometry for the target and bath
tgt_geo = geom.get_geometry(
tgt_cnf_save_fs, tgt_mod_thy_info, conf=conf)
bath_geo = geom.get_geometry(
bath_cnf_save_fs, bath_mod_thy_info, conf=conf)
# Set the path to the etrans lead fs
etrans_run_path = etrans_run_fs[-1].path(etrans_locs)
# Build the run directory
onedmin_run_path = lj_runner.build_rundir(etrans_run_path)
# Run an instancw of 1DMin for each processor
for idx in range(njobs):
# Build run directory
onedmin_job_path = lj_runner.make_jobdir(onedmin_run_path, idx)
# Write the input files
xyz1_str, xyz2_str = lj_runner.write_xyz(tgt_geo, bath_geo)
elstruct_inp_str, elstruct_sub_str = lj_runner.write_elstruct_inp(
lj_info, lj_mod_thy_info)
onedmin_str = lj_runner.write_input(
nsamp_per_job, smin=smin, smax=smax,
target_name='target.xyz', bath_name='bath.xyz')
input_strs = (
xyz1_str, xyz2_str,
elstruct_inp_str, elstruct_sub_str,
onedmin_str)
input_names = (
'target.xyz', 'bath.xyz',
'qc.mol', 'ene.x',
'input.dat')
inp = tuple(zip(input_strs, input_names))
amech_io.writer.write_files(
inp, onedmin_job_path, exe_names=('ene.x'))
# Write the batch submission script for each instance
onedmin_sub_str = lj_runner.write_onedmin_sub(
njobs, onedmin_run_path, onedmin_exe_path,
exe_name='onedmin-dd-molpro.x')
sub_inp = ((onedmin_sub_str, 'build.sh'),)
amech_io.writer.write_files(
sub_inp, onedmin_run_path, exe_names=('build.sh'))
# Submit the all of the OneDMin jobs
ioprinter.info_message('Running each OneDMin job...', newline=2)
run_script(onedmin_sub_str, onedmin_run_path)
def _savelj(etrans_run_fs, etrans_save_fs, etrans_locs,
etrans_keyword_dct):
""" Save the Lennard-Jones parameters
"""
# Read the dictionary
ljpotential = etrans_keyword_dct['pot']
# Set the run path to read the files
etrans_run_path = etrans_run_fs[-1].path(etrans_locs)
# Read any epsilons and sigma currently in the filesystem
ioprinter.info_message(
'Reading Lennard-Jones parameters and Geoms from filesystem...',
newline=1)
fs_geoms, fs_epsilons, fs_sigmas = gather.read_filesys(
etrans_save_fs, etrans_locs)
gather.print_lj_parms(fs_sigmas, fs_epsilons)
# Read the lj from all the output files
ioprinter.info_message(
'Reading Lennard-Jones parameters and Geoms from output...',
newline=1)
run_geoms, run_epsilons, run_sigmas = gather.read_output(etrans_run_path)
gather.print_lj_parms(run_sigmas, run_epsilons)
# Read the program and version for onedmin
prog_version = gather.prog_version(etrans_run_path)
# Add the lists from the two together
geoms = fs_geoms + run_geoms
sigmas = fs_sigmas + run_sigmas
epsilons = fs_epsilons + run_epsilons
# Average the sigma and epsilon values
if geoms and sigmas and epsilons:
assert len(geoms) == len(sigmas) == len(epsilons), (
'Number of geoms, sigmas, and epsilons not the same'
)
avg_sigma = statistics.mean(sigmas)
avg_epsilon = statistics.mean(epsilons)
nsampd = len(sigmas)
ioprinter.info_message(
'Average Sigma to save [unit]:', avg_sigma, newline=1)
ioprinter.info_message('Average Epsilont to save [unit]:', avg_epsilon)
ioprinter.info_message('Number of values = ', nsampd)
# Update the trajectory file
traj = []
for geo, eps, sig in zip(geoms, epsilons, sigmas):
comment = 'Epsilon: {} Sigma: {}'.format(eps, sig)
traj.append((comment, geo))
# Write the info obj
inf_obj = autofile.schema.info_objects.lennard_jones(
nsampd, potential=ljpotential,
program='OneDMin', version=prog_version)
# Set up the electronic structure input file
onedmin_inp_str = '<ONEDMIN INP>'
els_inp_str = '<ELSTRUCT INP>'
# Write the params to the save file system
etrans_save_fs[-1].file.lj_input.write(onedmin_inp_str, etrans_locs)
etrans_save_fs[-1].file.info.write(inf_obj, etrans_locs)
etrans_save_fs[-1].file.molpro_inp_file.write(els_inp_str, etrans_locs)
etrans_save_fs[-1].file.epsilon.write(avg_epsilon, etrans_locs)
etrans_save_fs[-1].file.sigma.write(avg_sigma, etrans_locs)
etrans_save_fs[1].file.trajectory.write(traj, etrans_locs)
| 36.065934 | 79 | 0.683628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,521 | 0.256043 |
6f801ba5e156e09ae80a405057d4699d1492e731 | 7,182 | py | Python | barry/convert.py | jyotiska/barry | 53f3b3f8c070cbc5b2d9dcadebe9f776d170b6ed | [
"MIT"
]
| null | null | null | barry/convert.py | jyotiska/barry | 53f3b3f8c070cbc5b2d9dcadebe9f776d170b6ed | [
"MIT"
]
| null | null | null | barry/convert.py | jyotiska/barry | 53f3b3f8c070cbc5b2d9dcadebe9f776d170b6ed | [
"MIT"
]
| null | null | null | from exceptions import BarryFileException, BarryConversionException, BarryExportException, BarryDFException
import pandas as pd
import requests
from StringIO import StringIO
def detect_file_extension(filename):
"""Extract and return the extension of a file given a filename.
Args:
filename (str): name of the file
Returns:
str: extension of the file
Raises:
BarryFileException: if extension not present in filename
"""
if filename is None:
raise BarryFileException("Input file name cannot be None")
split_filename = filename.split(".")
if len(split_filename) > 1:
return str(split_filename[-1]).lower()
else:
raise BarryFileException("Could not determine input file type from file extension")
def xls_to_df(filename, skip_rows, skip_header, columns):
"""Converts a XLS file to Pandas dataframe.
Args:
filename (str): name of the file
skip_rows (int): number of rows to skip from top
skip_header (bool): whether to skip header
columns (list or None): list of column names
Returns:
dataframe: a pandas dataframe
Raises:
BarryConversionException: if file cannot be converted to dataframe
"""
try:
# Check if columns names has been passed
if columns is not None and len(columns) > 0:
skip_header = 0
# Check if header needs to be skipped
if skip_header is True:
skip_header = None
else:
skip_header = 0
return pd.read_excel(filename, skiprows=skip_rows, header=skip_header, names=columns)
except Exception as e:
raise BarryConversionException("Could not convert file %s to dataframe" % (filename))
def xlsx_to_df(filename, skip_rows, skip_header, columns):
"""Converts a XLSX file to Pandas dataframe.
Args:
filename (str): name of the file
skip_rows (int): number of rows to skip from top
skip_header (bool): whether to skip header
columns (list or None): list of column names
Returns:
dataframe: a pandas dataframe
Raises:
BarryConversionException: if file cannot be converted to dataframe
"""
try:
# Check if columns names has been passed
if columns is not None and len(columns) > 0:
skip_header = 0
# Check if header needs to be skipped
if skip_header is True:
skip_header = None
else:
skip_header = 0
return pd.read_excel(filename, skiprows=skip_rows, header=skip_header, names=columns)
except Exception as e:
raise BarryConversionException("Could not convert file %s to dataframe" % (filename))
def csv_to_df(filename, skip_rows, skip_header, columns):
"""Converts a CSV file to Pandas dataframe.
Args:
filename (str): name of the file
skip_rows (int): number of rows to skip from top
skip_header (bool): whether to skip header
columns (list or None): list of column names
Returns:
dataframe: a pandas dataframe
Raises:
BarryConversionException: if file cannot be converted to dataframe
"""
try:
# Check if columns names has been passed
if columns is not None and len(columns) > 0:
skip_header = 0
# Check if header needs to be skipped
if skip_header is True:
skip_header = None
else:
skip_header = 0
return pd.read_csv(filename, skiprows=skip_rows, header=skip_header, names=columns)
except Exception as e:
raise BarryConversionException("Could not convert file %s to dataframe" % (filename))
def url_to_df(url, skip_rows, skip_header, columns):
"""Converts a CSV from HTTP URL to Pandas dataframe.
Args:
url (str): http url of the csv
skip_rows (int): number of rows to skip from top
skip_header (bool): whether to skip header
columns (list or None): list of column names
Returns:
dataframe: a pandas dataframe
Raises:
BarryConversionException: if file cannot be converted to dataframe
"""
try:
# Check if columns names has been passed
if columns is not None and len(columns) > 0:
skip_header = 0
# Check if header needs to be skipped
if skip_header is True:
skip_header = None
else:
skip_header = 0
url_content = requests.get(url).content
return pd.read_csv(StringIO(url_content), skiprows=skip_rows, header=skip_header, names=columns)
except Exception as e:
raise BarryConversionException("Could not convert file %s to dataframe" % (filename))
def df_to_xls(df, out_filename):
"""Writes a Pandas dataframe to a XLS file.
Args:
df (dataframe): dataframe to be written to file
filename (str): name of the file
Raises:
BarryExportException: if file cannot be converted to dataframe
"""
try:
df.to_excel(out_filename)
except Exception as e:
raise BarryExportException("Could not write dataframe to file %s" % (out_filename))
def df_to_xlsx(df, out_filename):
"""Writes a Pandas dataframe to a XLS file.
Args:
df (dataframe): dataframe to be written to file
filename (str): name of the file
Raises:
BarryExportException: if file cannot be converted to dataframe
"""
try:
df.to_excel(out_filename)
except Exception as e:
raise BarryExportException("Could not write dataframe to file %s" % (out_filename))
def df_to_json(df, out_filename):
"""Writes a Pandas dataframe to a JSON file.
Args:
df (dataframe): dataframe to be written to file
filename (str): name of the file
Raises:
BarryExportException: if file cannot be converted to dataframe
"""
try:
df.to_json(out_filename)
except Exception as e:
raise BarryExportException("Could not write dataframe to file %s" % (out_filename))
def df_to_csv(df, out_filename):
"""Writes a Pandas dataframe to a CSV file.
Args:
df (dataframe): dataframe to be written to file
filename (str): name of the file
Raises:
BarryExportException: if file cannot be converted to dataframe
"""
try:
df.to_csv(out_filename)
except Exception as e:
raise BarryExportException("Could not write dataframe to file %s" % (out_filename))
def sort_df(df, sort_column, ascending):
"""Sort a DataFrame with the column name passed in ascending/descending order.
Args:
df (dataframe): dataframe that needs to be sorted
sort_column (str): column to be sorted on
ascending (bool): sort order, ascending if True, descending if False
Returns:
dataframe: a pandas dataframe
Raises:
BarryDFException: if there is any error while sorting the dataframe
"""
try:
return df.sort(columns=sort_column, ascending=ascending)
except Exception as e:
raise BarryDFException("Could not sort dataframe on columns %s" % (sort_column))
| 31.362445 | 107 | 0.657059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,006 | 0.557783 |
6f812a049e2bbe774645e522bf2bfebedf410de7 | 451 | py | Python | flaskerize/schematics/flask-api/files/{{ name }}.template/commands/seed_command.py | darkguinito/myflaskerize | e76e3e4b6c91e2859b974aabf82e0ea5539bcf1b | [
"BSD-3-Clause"
]
| 1 | 2020-11-29T13:00:48.000Z | 2020-11-29T13:00:48.000Z | flaskerize/schematics/flask-api/files/{{ name }}.template/commands/seed_command.py | darkguinito/myflaskerize | e76e3e4b6c91e2859b974aabf82e0ea5539bcf1b | [
"BSD-3-Clause"
]
| null | null | null | flaskerize/schematics/flask-api/files/{{ name }}.template/commands/seed_command.py | darkguinito/myflaskerize | e76e3e4b6c91e2859b974aabf82e0ea5539bcf1b | [
"BSD-3-Clause"
]
| null | null | null | from flask_script import Command
from app import db
class SeedCommand(Command):
""" Seed the DB."""
def run(self):
if (
input(
"Are you sure you want to drop all tables and recreate? (y/N)\n"
).lower() == "y"
):
print("Dropping tables...")
db.drop_all()
db.create_all()
db.session.commit()
print("DB successfully seeded.")
| 22.55 | 80 | 0.501109 | 395 | 0.875831 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.290466 |
6f83b0761d99f7aecd9538ff15dd5acb4d03288c | 118 | py | Python | com/aptitute_tests/RSL.py | theeksha101/problem_solving | 431c4ff224035bb98ad67ead963860329dd4c9ff | [
"MIT"
]
| null | null | null | com/aptitute_tests/RSL.py | theeksha101/problem_solving | 431c4ff224035bb98ad67ead963860329dd4c9ff | [
"MIT"
]
| null | null | null | com/aptitute_tests/RSL.py | theeksha101/problem_solving | 431c4ff224035bb98ad67ead963860329dd4c9ff | [
"MIT"
]
| null | null | null | a = [2, 4, 5, 7, 8, 9]
sum = 0
for i in range(len(a) - 1):
if a[i] % 2 == 0:
sum = sum + a[i]
print(sum)
| 14.75 | 27 | 0.415254 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6f8670b2eab9bbcfcba5a2bac0b023bbc676fc76 | 8,724 | py | Python | plivo/rest/client.py | vaibhav-plivo/plivo-python | e4ae7559ba4647ac0e1af523c94d49a3fd6a24ca | [
"MIT"
]
| null | null | null | plivo/rest/client.py | vaibhav-plivo/plivo-python | e4ae7559ba4647ac0e1af523c94d49a3fd6a24ca | [
"MIT"
]
| 10 | 2020-10-19T06:47:45.000Z | 2021-06-25T15:41:34.000Z | plivo/rest/client.py | vaibhav-plivo/plivo-python | e4ae7559ba4647ac0e1af523c94d49a3fd6a24ca | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Core client, used for all API requests.
"""
import os
import platform
from collections import namedtuple
from plivo.base import ResponseObject
from plivo.exceptions import (AuthenticationError, InvalidRequestError,
PlivoRestError, PlivoServerError,
ResourceNotFoundError, ValidationError)
from plivo.resources import (Accounts, Addresses, Applications, Calls,
Conferences, Endpoints, Identities, Messages,
Numbers, Pricings, Recordings, Subaccounts)
from plivo.resources.live_calls import LiveCalls
from plivo.resources.queued_calls import QueuedCalls
from plivo.utils import is_valid_mainaccount, is_valid_subaccount
from plivo.version import __version__
from requests import Request, Session
AuthenticationCredentials = namedtuple('AuthenticationCredentials',
'auth_id auth_token')
PLIVO_API = 'https://api.plivo.com'
PLIVO_API_BASE_URI = '/'.join([PLIVO_API, 'v1/Account'])
def get_user_agent():
return 'plivo-python/%s (Python: %s)' % (__version__,
platform.python_version())
def fetch_credentials(auth_id, auth_token):
"""Fetches the right credentials either from params or from environment"""
if not (auth_id and auth_token):
try:
auth_id = os.environ['PLIVO_AUTH_ID']
auth_token = os.environ['PLIVO_AUTH_TOKEN']
except KeyError:
raise AuthenticationError('The Plivo Python SDK '
'could not find your auth credentials.')
if not (is_valid_mainaccount(auth_id) or is_valid_subaccount(auth_id)):
raise AuthenticationError('Invalid auth_id supplied: %s' % auth_id)
return AuthenticationCredentials(auth_id=auth_id, auth_token=auth_token)
class Client(object):
def __init__(self, auth_id=None, auth_token=None, proxies=None, timeout=5):
"""
The Plivo API client.
Deals with all the API requests to be made.
"""
self.base_uri = PLIVO_API_BASE_URI
self.session = Session()
self.session.headers.update({
'User-Agent': get_user_agent(),
'Content-Type': 'application/json',
'Accept': 'application/json',
})
self.session.auth = fetch_credentials(auth_id, auth_token)
self.multipart_session = Session()
self.multipart_session.headers.update({
'User-Agent': get_user_agent(),
'Cache-Control': 'no-cache',
})
self.multipart_session.auth = fetch_credentials(auth_id, auth_token)
self.proxies = proxies
self.timeout = timeout
self.account = Accounts(self)
self.subaccounts = Subaccounts(self)
self.applications = Applications(self)
self.calls = Calls(self)
self.live_calls = LiveCalls(self)
self.queued_calls = QueuedCalls(self)
self.conferences = Conferences(self)
self.endpoints = Endpoints(self)
self.messages = Messages(self)
self.numbers = Numbers(self)
self.pricing = Pricings(self)
self.recordings = Recordings(self)
self.addresses = Addresses(self)
self.identities = Identities(self)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.session.close()
self.multipart_session.close()
def process_response(self,
method,
response,
response_type=None,
objects_type=None):
"""Processes the API response based on the status codes and method used
to access the API
"""
try:
response_json = response.json(
object_hook=
lambda x: ResponseObject(x) if isinstance(x, dict) else x)
if response_type:
r = response_type(self, response_json.__dict__)
response_json = r
if 'objects' in response_json and objects_type:
response_json.objects = [
objects_type(self, obj.__dict__)
for obj in response_json.objects
]
except ValueError:
response_json = None
if response.status_code == 400:
if response_json and 'error' in response_json:
raise ValidationError(response_json.error)
raise ValidationError(
'A parameter is missing or is invalid while accessing resource'
'at: {url}'.format(url=response.url))
if response.status_code == 401:
if response_json and 'error' in response_json:
raise AuthenticationError(response_json.error)
raise AuthenticationError(
'Failed to authenticate while accessing resource at: '
'{url}'.format(url=response.url))
if response.status_code == 404:
if response_json and 'error' in response_json:
raise ResourceNotFoundError(response_json.error)
raise ResourceNotFoundError(
'Resource not found at: {url}'.format(url=response.url))
if response.status_code == 405:
if response_json and 'error' in response_json:
raise InvalidRequestError(response_json.error)
raise InvalidRequestError(
'HTTP method "{method}" not allowed to access resource at: '
'{url}'.format(method=method, url=response.url))
if response.status_code == 500:
if response_json and 'error' in response_json:
raise PlivoServerError(response_json.error)
raise PlivoServerError(
'A server error occurred while accessing resource at: '
'{url}'.format(url=response.url))
if method == 'DELETE':
if response.status_code != 204:
raise PlivoRestError('Resource at {url} could not be '
'deleted'.format(url=response.url))
elif response.status_code not in [200, 201, 202]:
raise PlivoRestError(
'Received status code {status_code} for the HTTP method '
'"{method}"'.format(
status_code=response.status_code, method=method))
return response_json
def create_request(self, method, path=None, data=None):
path = path or []
req = Request(method, '/'.join([self.base_uri, self.session.auth[0]] +
list([str(p) for p in path])) + '/',
**({
'params': data
} if method == 'GET' else {
'json': data
}))
return self.session.prepare_request(req)
def create_multipart_request(self,
method,
path=None,
data=None,
files=None):
path = path or []
data_args = {}
if method == 'GET':
data_args['params'] = data
else:
data_args['data'] = data
if files and 'file' in files and files['file'] != '':
data_args['files'] = files
req = Request(method,
'/'.join([self.base_uri, self.multipart_session.auth[0]]
+ list([str(p) for p in path])) + '/', **(
data_args))
return self.multipart_session.prepare_request(req)
def send_request(self, request, **kwargs):
if 'session' in kwargs:
session = kwargs['session']
del kwargs['session']
else:
session = self.session
return session.send(
request, proxies=self.proxies, timeout=self.timeout, **kwargs)
def request(self,
method,
path=None,
data=None,
response_type=None,
objects_type=None,
files=None,
**kwargs):
if files is not None:
req = self.create_multipart_request(method, path, data, files)
session = self.multipart_session
else:
req = self.create_request(method, path, data)
session = self.session
kwargs['session'] = session
res = self.send_request(req, **kwargs)
return self.process_response(method, res, response_type, objects_type)
| 38.09607 | 79 | 0.568432 | 6,822 | 0.781981 | 0 | 0 | 0 | 0 | 0 | 0 | 1,262 | 0.144658 |
6f86c0ab72e5425a1aa9524504a9484391892c01 | 4,301 | py | Python | Project2/part3/part3controller.py | tyrenyabe/CSE461 | 8a0ae4b4a3f41d659ff784488037583c638d8c43 | [
"MIT"
]
| null | null | null | Project2/part3/part3controller.py | tyrenyabe/CSE461 | 8a0ae4b4a3f41d659ff784488037583c638d8c43 | [
"MIT"
]
| null | null | null | Project2/part3/part3controller.py | tyrenyabe/CSE461 | 8a0ae4b4a3f41d659ff784488037583c638d8c43 | [
"MIT"
]
| null | null | null | # Part 3 of UWCSE's Project 3
#
# based on Lab Final from UCSC's Networking Class
# which is based on of_tutorial by James McCauley
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.addresses import IPAddr, IPAddr6, EthAddr
log = core.getLogger()
#statically allocate a routing table for hosts
#MACs used in only in part 4
IPS = {
"h10" : ("10.0.1.10", '00:00:00:00:00:01'),
"h20" : ("10.0.2.20", '00:00:00:00:00:02'),
"h30" : ("10.0.3.30", '00:00:00:00:00:03'),
"serv1" : ("10.0.4.10", '00:00:00:00:00:04'),
"hnotrust" : ("172.16.10.100", '00:00:00:00:00:05'),
}
class Part3Controller (object):
"""
A Connection object for that switch is passed to the __init__ function.
"""
def __init__ (self, connection):
print (connection.dpid)
# Keep track of the connection to the switch so that we can
# send it messages!
self.connection = connection
# This binds our PacketIn event listener
connection.addListeners(self)
#use the dpid to figure out what switch is being created
if (connection.dpid == 1):
self.s1_setup()
elif (connection.dpid == 2):
self.s2_setup()
elif (connection.dpid == 3):
self.s3_setup()
elif (connection.dpid == 21):
self.cores21_setup()
elif (connection.dpid == 31):
self.dcs31_setup()
else:
print ("UNKNOWN SWITCH")
exit(1)
def s1_setup(self):
#put switch 1 rules here
fm=of.ofp_flow_mod()
fm.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
self.connection.send(fm)
def s2_setup(self):
#put switch 2 rules here
fm=of.ofp_flow_mod()
fm.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
self.connection.send(fm)
def s3_setup(self):
#put switch 3 rules here
fm=of.ofp_flow_mod()
fm.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
self.connection.send(fm)
def cores21_setup(self):
#put core switch rules here
fm=of.ofp_flow_mod()
fm.match.nw_src = IPAddr(IPS["hnotrust"][0])
fm.match.dl_type = 0x0800
fm.match.nw_proto = 1 #ICMP
self.connection.send(fm)
fm=of.ofp_flow_mod()
fm.match.nw_src = IPAddr(IPS["hnotrust"][0])
fm.match.dl_type = 0x0800
fm.match.nw_dst = IPAddr(IPS["serv1"][0])
self.connection.send(fm)
fm=of.ofp_flow_mod()
# fm.match.dl_type = 0x0806
fm.match.nw_dst = IPAddr(IPS["h10"][0])
fm.actions.append(of.ofp_action_output(port = 1))
self.connection.send(fm)
fm=of.ofp_flow_mod()
# fm.match.dl_type = 0x0806
fm.match.nw_dst = IPAddr(IPS["h20"][0])
fm.actions.append(of.ofp_action_output(port = 2))
self.connection.send(fm)
fm=of.ofp_flow_mod()
# fm.match.dl_type = 0x0806
fm.match.nw_dst = IPAddr(IPS["h30"][0])
fm.actions.append(of.ofp_action_output(port = 3))
self.connection.send(fm)
fm=of.ofp_flow_mod()
# fm.match.dl_type = 0x0806
fm.match.nw_dst = IPAddr(IPS["serv1"][0])
fm.actions.append(of.ofp_action_output(port = 4))
self.connection.send(fm)
def dcs31_setup(self):
#put datacenter switch rules here
fm=of.ofp_flow_mod()
fm.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
self.connection.send(fm)
#used in part 4 to handle individual ARP packets
#not needed for part 3 (USE RULES!)
#causes the switch to output packet_in on out_port
def resend_packet(self, packet_in, out_port):
msg = of.ofp_packet_out()
msg.data = packet_in
action = of.ofp_action_output(port = out_port)
msg.actions.append(action)
self.connection.send(msg)
def _handle_PacketIn (self, event):
"""
Packets not handled by the router rules will be
forwarded to this method to be handled by the controller
"""
packet = event.parsed # This is the parsed packet data.
if not packet.parsed:
log.warning("Ignoring incomplete packet")
return
packet_in = event.ofp # The actual ofp_packet_in message.
print ("Unhandled packet from " + str(self.connection.dpid) + ":" + packet.dump())
def launch ():
"""
Starts the component
"""
def start_switch (event):
log.debug("Controlling %s" % (event.connection,))
Part3Controller(event.connection)
core.openflow.addListenerByName("ConnectionUp", start_switch)
| 29.662069 | 86 | 0.670077 | 3,457 | 0.803767 | 0 | 0 | 0 | 0 | 0 | 0 | 1,397 | 0.324808 |
6f86e19366559629464f6c94fe703e3f9d6154c1 | 956 | py | Python | lessons/terminal.report.py | thepros847/python_programiing | d177f79d0d1f21df434bf3f8663ae6469fcf8357 | [
"MIT"
]
| null | null | null | lessons/terminal.report.py | thepros847/python_programiing | d177f79d0d1f21df434bf3f8663ae6469fcf8357 | [
"MIT"
]
| null | null | null | lessons/terminal.report.py | thepros847/python_programiing | d177f79d0d1f21df434bf3f8663ae6469fcf8357 | [
"MIT"
]
| null | null | null | #students exams data entries for terminal report card
print("Westside Educational Complex--End Of second Terminal Report--Class-KKJA--Name:Theodora Obaa Yaa Gyarbeng")
while True:
student_score = float(input ("Enter the student score:"))
if student_score >= 1.0 and student_score <= 39.9:
print("student_score is F9", "fail")
elif student_score >= 40 and student_score <= 49.9:
print("student_score is E8", "pass" )
elif student_score >= 50 and student_score <= 59.9:
print("student_score is D7", "credit")
elif student_score >= 60 and student_score <= 69.9:
print("student_score is C4", "good")
elif student_score >= 70 and student_score <= 79.9:
print("student_score is B2", "very_good")
elif student_score >= 80 and student_score <= 100:
print("student_score is A1", "excellent")
else:
print("student_score is invalid entry")
student = []
| 39.833333 | 113 | 0.654812 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 391 | 0.408996 |
6f8acfd21a05c8bfceaaf10a028a424c71fb2404 | 201 | py | Python | Python/ex_semanal.py | ArikBartzadok/beecrowd-challenges | ddb0453d1caa75c87c4b3ed6a40309ab99da77f2 | [
"MIT"
]
| null | null | null | Python/ex_semanal.py | ArikBartzadok/beecrowd-challenges | ddb0453d1caa75c87c4b3ed6a40309ab99da77f2 | [
"MIT"
]
| null | null | null | Python/ex_semanal.py | ArikBartzadok/beecrowd-challenges | ddb0453d1caa75c87c4b3ed6a40309ab99da77f2 | [
"MIT"
]
| null | null | null | n = int(input())
l = []
c = 0
for i in range(0,n):
p = input()
print('c -> ', c)
if p in l:
c += 1
l.append(p)
print("Falta(m) {} pomekon(s).".format(151 - (n-c))) | 12.5625 | 52 | 0.41791 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.159204 |
6f8bf885df7a7d6ca45c1dc88e7c1c7b2dc8f976 | 551 | py | Python | util/templatetags/custom_tags.py | dvcolgan/ludumdare27 | 15387d1313de488ee7503189614d42245e6ae56b | [
"MIT"
]
| null | null | null | util/templatetags/custom_tags.py | dvcolgan/ludumdare27 | 15387d1313de488ee7503189614d42245e6ae56b | [
"MIT"
]
| null | null | null | util/templatetags/custom_tags.py | dvcolgan/ludumdare27 | 15387d1313de488ee7503189614d42245e6ae56b | [
"MIT"
]
| null | null | null | from django import template
from django.conf import settings
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag
def setting(name):
return getattr(settings, name, "")
#@register.filter
#def format_difference(value):
# number = int(value)
# if number > 0:
# return mark_safe('<span style="color: green">+' + str(number) + '</span>')
# elif number < 0:
# return mark_safe('<span style="color: red">' + str(number) + '</span>')
# else:
# return mark_safe(str(number))
| 26.238095 | 83 | 0.662432 | 0 | 0 | 0 | 0 | 78 | 0.141561 | 0 | 0 | 324 | 0.588022 |
6f8c3e1625cd1f82b18d5c123f61e8f119682b0c | 507 | py | Python | Day1/day1.py | leblancpj/AoC21 | 12ceb0fa56245ca803708c042dc72bdefb38e298 | [
"MIT"
]
| null | null | null | Day1/day1.py | leblancpj/AoC21 | 12ceb0fa56245ca803708c042dc72bdefb38e298 | [
"MIT"
]
| null | null | null | Day1/day1.py | leblancpj/AoC21 | 12ceb0fa56245ca803708c042dc72bdefb38e298 | [
"MIT"
]
| null | null | null | # Given a series of input numbers, count the number of times
# the values increase from one to the next.
import pandas as pd
# Part 1
sample = pd.read_csv(".\Day1\sample.txt", header=None, squeeze=True)
input = pd.read_csv(".\Day1\input.txt", header=None, squeeze=True)
#print(type(input))
ans = input.diff(1).apply(lambda x: x > 0).sum()
#print(ans)
# Part 2
#print(sample)
rolling = input.rolling(window=3,min_periods=3,center=True)
print(rolling.sum().dropna().diff(1).apply(lambda x: x > 0).sum())
| 28.166667 | 69 | 0.706114 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 200 | 0.394477 |
6f8ebcc85be160184b266c276cc0f3687216d3eb | 206 | py | Python | KivyTest.py | ethanmac9/GeneralTools | 96d2cd38b2b5f012113c92b751cd6fdfa8e1f1df | [
"MIT"
]
| 1 | 2016-09-24T17:38:06.000Z | 2016-09-24T17:38:06.000Z | KivyTest.py | ethanmac9/GeneralTools | 96d2cd38b2b5f012113c92b751cd6fdfa8e1f1df | [
"MIT"
]
| null | null | null | KivyTest.py | ethanmac9/GeneralTools | 96d2cd38b2b5f012113c92b751cd6fdfa8e1f1df | [
"MIT"
]
| null | null | null | import kivy
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
kivy.require('1.9.0')
class GUITestApp(App):
def build(self):
return BoxLayout()
glApp = GUITestApp()
glApp.run()
| 17.166667 | 40 | 0.713592 | 70 | 0.339806 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.033981 |
6f8edf6b803563f114318f388210647b9924420a | 11,263 | py | Python | avalanche/evaluation/metrics/gpu_usage.py | aishikhar/avalanche | 39c361aba1663795ed33f093ab2e15cc5792026e | [
"MIT"
]
| 1 | 2021-08-11T19:43:38.000Z | 2021-08-11T19:43:38.000Z | avalanche/evaluation/metrics/gpu_usage.py | aishikhar/avalanche | 39c361aba1663795ed33f093ab2e15cc5792026e | [
"MIT"
]
| null | null | null | avalanche/evaluation/metrics/gpu_usage.py | aishikhar/avalanche | 39c361aba1663795ed33f093ab2e15cc5792026e | [
"MIT"
]
| 1 | 2021-04-09T08:10:27.000Z | 2021-04-09T08:10:27.000Z | ################################################################################
# Copyright (c) 2021 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 19-01-2021 #
# Author(s): Vincenzo Lomonaco, Lorenzo Pellegrini #
# E-mail: [email protected] #
# Website: www.continualai.org #
################################################################################
import GPUtil
from threading import Thread
import time
import warnings
from typing import Optional, TYPE_CHECKING, List
from avalanche.evaluation import Metric, PluginMetric
from avalanche.evaluation.metric_results import MetricValue, MetricResult
from avalanche.evaluation.metric_utils import get_metric_name, \
phase_and_task, stream_type
if TYPE_CHECKING:
from avalanche.training import BaseStrategy
class MaxGPU(Metric[float]):
"""
The standalone GPU usage metric.
Important: this metric approximates the real maximum GPU percentage
usage since it sample at discrete amount of time the GPU values.
Instances of this metric keeps the maximum GPU usage percentage detected.
The `start_thread` method starts the usage tracking.
The `stop_thread` method stops the tracking.
The result, obtained using the `result` method, is the usage in mega-bytes.
The reset method will bring the metric to its initial state. By default
this metric in its initial state will return an usage value of 0.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the GPU usage metric.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
self.every = every
self.gpu_id = gpu_id
n_gpus = len(GPUtil.getGPUs())
if n_gpus == 0:
warnings.warn("Your system has no GPU!")
self.gpu_id = None
elif gpu_id < 0:
warnings.warn("GPU metric called with negative GPU id."
"GPU logging disabled")
self.gpu_id = None
else:
if gpu_id >= n_gpus:
warnings.warn(f"GPU {gpu_id} not found. Using GPU 0.")
self.gpu_id = 0
self.thread = None
"""
Thread executing GPU monitoring code
"""
self.stop_f = False
"""
Flag to stop the thread
"""
self.max_usage = 0
"""
Main metric result. Max GPU usage.
"""
def _f(self):
"""
Until a stop signal is encountered,
this function monitors each `every` seconds
the maximum amount of GPU used by the process
"""
start_time = time.monotonic()
while not self.stop_f:
# GPU percentage
gpu_perc = GPUtil.getGPUs()[self.gpu_id].load * 100
if gpu_perc > self.max_usage:
self.max_usage = gpu_perc
time.sleep(self.every - ((time.monotonic() - start_time)
% self.every))
def start_thread(self):
if self.gpu_id:
assert not self.thread, "Trying to start thread " \
"without joining the previous."
self.thread = Thread(target=self._f, daemon=True)
self.thread.start()
def stop_thread(self):
if self.thread:
self.stop_f = True
self.thread.join()
self.stop_f = False
self.thread = None
def reset(self) -> None:
"""
Resets the metric.
:return: None.
"""
self.max_usage = 0
def result(self) -> Optional[float]:
"""
Returns the max GPU percentage value.
:return: The percentage GPU usage as a float value in range [0, 1].
"""
return self.max_usage
class MinibatchMaxGPU(PluginMetric[float]):
"""
The Minibatch Max GPU metric.
This plugin metric only works at training time.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the Minibatch Max GPU metric
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
super().__init__()
self.gpu_id = gpu_id
self._gpu = MaxGPU(gpu_id, every)
def before_training(self, strategy: 'BaseStrategy') \
-> None:
self._gpu.start_thread()
def before_training_iteration(self, strategy: 'BaseStrategy') -> None:
self.reset()
def after_training_iteration(self, strategy: 'BaseStrategy') \
-> MetricResult:
return self._package_result(strategy)
def after_training(self, strategy: 'BaseStrategy') -> None:
self._gpu.stop_thread()
def reset(self) -> None:
self._gpu.reset()
def result(self) -> float:
return self._gpu.result()
def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
gpu_usage = self.result()
metric_name = get_metric_name(self, strategy)
plot_x_position = self.get_global_counter()
return [MetricValue(self, metric_name, gpu_usage, plot_x_position)]
def __str__(self):
return f"MaxGPU{self.gpu_id}Usage_MB"
class EpochMaxGPU(PluginMetric[float]):
"""
The Epoch Max GPU metric.
This plugin metric only works at training time.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the epoch Max GPU metric.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
super().__init__()
self.gpu_id = gpu_id
self._gpu = MaxGPU(gpu_id, every)
def before_training(self, strategy: 'BaseStrategy') \
-> None:
self._gpu.start_thread()
def before_training_epoch(self, strategy) -> MetricResult:
self.reset()
def after_training_epoch(self, strategy: 'BaseStrategy') \
-> MetricResult:
return self._package_result(strategy)
def after_training(self, strategy: 'BaseStrategy') -> None:
self._gpu.stop_thread()
def reset(self) -> None:
self._gpu.reset()
def result(self) -> float:
return self._gpu.result()
def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
gpu_usage = self.result()
metric_name = get_metric_name(self, strategy)
plot_x_position = self.get_global_counter()
return [MetricValue(self, metric_name, gpu_usage, plot_x_position)]
def __str__(self):
return f"MaxGPU{self.gpu_id}Usage_Epoch"
class ExperienceMaxGPU(PluginMetric[float]):
"""
The Experience Max GPU metric.
This plugin metric only works at eval time.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the Experience CPU usage metric.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
super().__init__()
self.gpu_id = gpu_id
self._gpu = MaxGPU(gpu_id, every)
def before_eval(self, strategy: 'BaseStrategy') \
-> None:
self._gpu.start_thread()
def before_eval_exp(self, strategy) -> MetricResult:
self.reset()
def after_eval_exp(self, strategy: 'BaseStrategy') \
-> MetricResult:
return self._package_result(strategy)
def after_eval(self, strategy: 'BaseStrategy') -> None:
self._gpu.stop_thread()
def reset(self) -> None:
self._gpu.reset()
def result(self) -> float:
return self._gpu.result()
def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
gpu_usage = self.result()
metric_name = get_metric_name(self, strategy, add_experience=True)
plot_x_position = self.get_global_counter()
return [MetricValue(self, metric_name, gpu_usage, plot_x_position)]
def __str__(self):
return f"MaxGPU{self.gpu_id}Usage_Experience"
class StreamMaxGPU(PluginMetric[float]):
"""
The Stream Max GPU metric.
This plugin metric only works at eval time.
"""
def __init__(self, gpu_id, every=0.5):
"""
Creates an instance of the Experience CPU usage metric.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
"""
super().__init__()
self.gpu_id = gpu_id
self._gpu = MaxGPU(gpu_id, every)
def before_eval(self, strategy) -> MetricResult:
self.reset()
self._gpu.start_thread()
def after_eval(self, strategy: 'BaseStrategy') \
-> MetricResult:
packed = self._package_result(strategy)
self._gpu.stop_thread()
return packed
def reset(self) -> None:
self._gpu.reset()
def result(self) -> float:
return self._gpu.result()
def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
gpu_usage = self.result()
phase_name, _ = phase_and_task(strategy)
stream = stream_type(strategy.experience)
metric_name = '{}/{}_phase/{}_stream' \
.format(str(self),
phase_name,
stream)
plot_x_position = self.get_global_counter()
return [MetricValue(self, metric_name, gpu_usage, plot_x_position)]
def __str__(self):
return f"MaxGPU{self.gpu_id}Usage_Stream"
def gpu_usage_metrics(gpu_id, every=0.5, minibatch=False, epoch=False,
experience=False, stream=False) -> List[PluginMetric]:
"""
Helper method that can be used to obtain the desired set of
plugin metrics.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
:param minibatch: If True, will return a metric able to log the minibatch
max GPU usage.
:param epoch: If True, will return a metric able to log the epoch
max GPU usage.
:param experience: If True, will return a metric able to log the experience
max GPU usage.
:param stream: If True, will return a metric able to log the evaluation
max stream GPU usage.
:return: A list of plugin metrics.
"""
metrics = []
if minibatch:
metrics.append(MinibatchMaxGPU(gpu_id, every))
if epoch:
metrics.append(EpochMaxGPU(gpu_id, every))
if experience:
metrics.append(ExperienceMaxGPU(gpu_id, every))
if stream:
metrics.append(StreamMaxGPU(gpu_id, every))
return metrics
__all__ = [
'MaxGPU',
'MinibatchMaxGPU',
'EpochMaxGPU',
'ExperienceMaxGPU',
'StreamMaxGPU',
'gpu_usage_metrics'
]
| 29.717678 | 80 | 0.589097 | 8,760 | 0.777768 | 0 | 0 | 0 | 0 | 0 | 0 | 4,598 | 0.408239 |
6f8f1cd00d467ef37a750fa1bf46d98ba2fd1d86 | 988 | py | Python | 901-1000/971.flip-binary-tree-to-match-preorder-traversal.py | guangxu-li/leetcode-in-python | 8a5a373b32351500342705c141591a1a8f5f1cb1 | [
"MIT"
]
| null | null | null | 901-1000/971.flip-binary-tree-to-match-preorder-traversal.py | guangxu-li/leetcode-in-python | 8a5a373b32351500342705c141591a1a8f5f1cb1 | [
"MIT"
]
| null | null | null | 901-1000/971.flip-binary-tree-to-match-preorder-traversal.py | guangxu-li/leetcode-in-python | 8a5a373b32351500342705c141591a1a8f5f1cb1 | [
"MIT"
]
| null | null | null | #
# @lc app=leetcode id=971 lang=python3
#
# [971] Flip Binary Tree To Match Preorder Traversal
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import deque
class Solution:
def flipMatchVoyage(self, root: TreeNode, voyage: list[int]) -> list[int]:
nodes, i, flipped = deque([root]), 0, []
while nodes:
node = nodes.pop()
if not node:
continue
if node.val != voyage[i]:
return [-1]
i += 1
if node.left and node.left.val != voyage[i]:
flipped.append(node.val)
nodes.append(node.left)
nodes.append(node.right)
else:
nodes.append(node.right)
nodes.append(node.left)
return flipped
| 25.333333 | 78 | 0.526316 | 645 | 0.652834 | 0 | 0 | 0 | 0 | 0 | 0 | 295 | 0.298583 |
6f8f8270104be3d27614c913a1c738aa92c207d4 | 5,799 | py | Python | hydraulics/ifcb/classifier.py | axiom-data-science/hydraulics | 15031d9694cfe1e552c260079ff60a9faa101ba2 | [
"MIT"
]
| 1 | 2021-09-14T23:07:24.000Z | 2021-09-14T23:07:24.000Z | hydraulics/ifcb/classifier.py | axiom-data-science/hydraulics | 15031d9694cfe1e552c260079ff60a9faa101ba2 | [
"MIT"
]
| null | null | null | hydraulics/ifcb/classifier.py | axiom-data-science/hydraulics | 15031d9694cfe1e552c260079ff60a9faa101ba2 | [
"MIT"
]
| null | null | null | import base64
import datetime
import io
import json
import os
import requests
from collections import namedtuple
from urllib.parse import urlparse
import faust
import numpy as np
import keras_preprocessing.image as keras_img
from avro import schema
from confluent_kafka import avro
from confluent_kafka.avro import AvroProducer
from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient
from confluent_kafka.schema_registry import SchemaRegistryClient
from confluent_kafka.schema_registry.avro import AvroSerializer
from biovolume import calc_biovolume
from blob import Blob, BlobConfig
config_path = os.environ.get('IFCB_STREAM_APP_CONFIG', 'config.json')
with open(config_path) as config_file:
config = json.load(config_file)
Stats = namedtuple(
'Stats',
['time', 'ifcb_id', 'roi', 'name', 'classifier', 'prob', 'classification_time', 'biovolume', 'carbon', 'hab']
)
ClassifierStats = namedtuple(
'ClassifierStats',
['sample_name', 'prob', 'classifier', 'classification_time']
)
schema_config = {
'url': config['schema.registry.url'],
'ssl.ca.location': None
}
# need to use CachedSchemaRegistryClient to get schema
# - need to copy config because it is consumed when used in CachedSchemaRegistryClient
schema_config_copy = schema_config.copy()
cached_schema_client = CachedSchemaRegistryClient(schema_config)
key_schema = str(cached_schema_client.get_latest_schema('ifcb-stats-key')[1])
value_schema = str(cached_schema_client.get_latest_schema('ifcb-stats-value')[1])
key_schema = avro.loads(key_schema)
value_schema = avro.loads(value_schema)
producer = AvroProducer({
'bootstrap.servers': config['bootstrap.servers'],
'schema.registry.url': config['schema.registry.url']
},
default_key_schema=key_schema,
default_value_schema=value_schema
)
app = faust.App(
config['app_name'],
broker=config['broker'],
topic_partitions=config['topic_partitions'],
store='rocksdb://',
consumer_auto_offset_reset='earliest',
version=1
)
image_topic = app.topic(config['image_topic'])
stats_topic = app.topic(config['stats_topic'])
classifier_stats_table = app.Table('ifcb-classifier-stats', default=ClassifierStats)
diatoms = config['diatoms']
class_names = config['class_names']
hab_species = config['hab_species']
def publish_stats(feature_key, image, classifier_stats, blob_config=BlobConfig()):
"""Calculate biovolume, carbon, hab, and publish to Kafka"""
# calculate biovolume
# - scale biovolume for 3d (from ifcb-analysis)
blob = Blob(image, blob_config)
biovolume = calc_biovolume(blob)
mu = 1/3.4
biovolume = biovolume * mu ** 3
carbon = calc_carbon(classifier_stats[0], biovolume)
hab = classifier_stats[0] in hab_species
time, ifcb_id, roi = feature_key.split('_')
roi = int(roi)
timestamp = int(datetime.datetime.strptime(time[1:], '%Y%m%dT%H%M%S').timestamp())
stats = Stats(
timestamp,
ifcb_id,
roi,
classifier_stats[0],
classifier_stats[2],
classifier_stats[1],
classifier_stats[3],
biovolume,
carbon,
hab
)
# send to topic with Avro schema
producer.poll(0)
producer.produce(
topic=config['stats_topic'],
key={
'pid': f"{time}_{ifcb_id}",
'roi': int(roi)
},
value=stats._asdict()
)
producer.flush()
@app.agent(image_topic)
async def classify(images, url=config['tensorflow_url'], target_size=(224, 224)):
async for image in images:
# decode binary blob to png file then resize and normalize
image_str = base64.b64decode(image['image'])
image_file = io.BytesIO(image_str)
img = keras_img.img_to_array(
keras_img.load_img(image_file, target_size=target_size)
)
img /= 255
# create payload and send to TF RESTful API
headers = {"content-type": "application/json"}
data = json.dumps({'instances': [img.tolist()]})
result = requests.post(url, headers=headers, data=data)
# save the probabilities for each class (1d ndarray)
probs = result.json()['predictions'][0][:]
# feature_key is roi
time = datetime.datetime.fromtimestamp(image['datetime'])
feature_key = f"{time:D%Y%m%dT%H%M%S}_{image['ifcb_id']}_{image['roi']:05}"
print(f'processing {feature_key}')
# update table if current prob is greater than what is already in the table
prob = np.nanmax(probs)
if feature_key not in classifier_stats_table or prob > classifier_stats_table[feature_key].prob:
name = class_names[np.argmax(probs)]
classifier, version = get_classifier(url)
classifier_version = f'{classifier}:{version}'
classifier_stats_table[feature_key] = ClassifierStats(
name,
prob,
classifier_version,
int(datetime.datetime.utcnow().timestamp())
)
# send
publish_stats(feature_key, image_str, classifier_stats_table[feature_key])
def get_classifier(url):
"""Given TF style url, return name and version"""
parse_results = urlparse(url)
_, version, _, name_raw = parse_results.path.split('/')
name = name_raw.split(':')[0]
return (name, version)
def calc_carbon(english_name, scaled_biovolume, diatom_list=diatoms):
"""Given volume in u3/cell return carbon in pg C/cell.
$log_10(C) = log(a) + b \cdot log_10(V)$
"""
if english_name in diatom_list:
carbon = 10**(-0.665 + 0.939*np.log10(scaled_biovolume))
else:
carbon = 10**(-0.993 + 0.881*np.log10(scaled_biovolume))
return carbon
if __name__ == '__main__':
app.main()
| 31.862637 | 113 | 0.682876 | 0 | 0 | 0 | 0 | 1,704 | 0.293844 | 1,680 | 0.289705 | 1,490 | 0.256941 |
6f8fb26b38bd837cc1990577bdd9ed4c340e3f51 | 1,296 | py | Python | upcfcardsearch/c260.py | ProfessorSean/Kasutamaiza | 7a69a69258f67bbb88bebbac6da4e6e1434947e6 | [
"MIT"
]
| null | null | null | upcfcardsearch/c260.py | ProfessorSean/Kasutamaiza | 7a69a69258f67bbb88bebbac6da4e6e1434947e6 | [
"MIT"
]
| null | null | null | upcfcardsearch/c260.py | ProfessorSean/Kasutamaiza | 7a69a69258f67bbb88bebbac6da4e6e1434947e6 | [
"MIT"
]
| null | null | null | import discord
from discord.ext import commands
from discord.utils import get
class c260(commands.Cog, name="c260"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='Yikilth_Lair_of_the_Abyssals', aliases=['c260', 'Abyssal_11'])
async def example_embed(self, ctx):
embed = discord.Embed(title='Yikilth, Lair of the Abyssals',
color=0x1D9E74)
embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2360326.jpg')
embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3 (Abyssal)', inline=True)
embed.add_field(name='Type', value='Spell/Field', inline=False)
embed.add_field(name='Card Effect', value='When this card is activated: Add 1 "Abyssal" monster from your Deck to your hand. Once per turn, when your opponent activates a card or effect that targets and/or would destroy a Set monster(s) you control: You can flip 1 Set monster you control into face-up Attack or Defense Position; negate the activation. You can only activate 1 "Yikilth, Lair of the Abyssals" per turn.', inline=False)
embed.set_footer(text='Set Code: ANCF')
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(c260(bot)) | 56.347826 | 442 | 0.706019 | 1,159 | 0.89429 | 0 | 0 | 1,049 | 0.809414 | 958 | 0.739198 | 631 | 0.486883 |
6f904b06f4d8b0199476081eeaa6c6a6c588158e | 98 | py | Python | test_cookiecutter_ali92hm/__main__.py | ali92hm/test-cookiecutter | 5fbfef57e7ea8ae8b826958cb1db21812cc8467e | [
"ISC"
]
| null | null | null | test_cookiecutter_ali92hm/__main__.py | ali92hm/test-cookiecutter | 5fbfef57e7ea8ae8b826958cb1db21812cc8467e | [
"ISC"
]
| null | null | null | test_cookiecutter_ali92hm/__main__.py | ali92hm/test-cookiecutter | 5fbfef57e7ea8ae8b826958cb1db21812cc8467e | [
"ISC"
]
| null | null | null | from .cli import entrypoint
if __name__ == "__main__": # pragma: no cover
entrypoint.main()
| 19.6 | 46 | 0.693878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.285714 |
6f9177f95c9276da027118820c1944dc489b0063 | 137 | py | Python | backend/elasticsurgery/views/__init__.py | EDITD/ElasticSurgery | 458571d48541d1ddbbfeb20e04703592e5f869e0 | [
"MIT"
]
| null | null | null | backend/elasticsurgery/views/__init__.py | EDITD/ElasticSurgery | 458571d48541d1ddbbfeb20e04703592e5f869e0 | [
"MIT"
]
| 27 | 2019-09-25T14:19:44.000Z | 2022-02-12T21:39:17.000Z | backend/elasticsurgery/views/__init__.py | EDITD/ElasticSurgery | 458571d48541d1ddbbfeb20e04703592e5f869e0 | [
"MIT"
]
| null | null | null | from flask import jsonify
from ..app import app
@app.route('/ping', methods=('GET',))
def get_ping():
return jsonify(ping='pong')
| 15.222222 | 37 | 0.671533 | 0 | 0 | 0 | 0 | 85 | 0.620438 | 0 | 0 | 18 | 0.131387 |
6f91ab094e27b4b2253d62e62b30b9f0a9a83bbc | 1,726 | py | Python | amftrack/notebooks/analysis/data_info.py | Cocopyth/MscThesis | 60162bc779a3a668e7447b60bb9a4b2a616b8093 | [
"MIT"
]
| 1 | 2021-06-10T02:51:53.000Z | 2021-06-10T02:51:53.000Z | amftrack/notebooks/analysis/data_info.py | Cocopyth/MscThesis | 60162bc779a3a668e7447b60bb9a4b2a616b8093 | [
"MIT"
]
| null | null | null | amftrack/notebooks/analysis/data_info.py | Cocopyth/MscThesis | 60162bc779a3a668e7447b60bb9a4b2a616b8093 | [
"MIT"
]
| null | null | null | inst_25 = [(35,0,15),(29,0,20),(9,0,11),(9,13,35),(3,0,19),(37,0,8),(11,0,30),(19,0,25),(13,0,25),(39,0,18)]
inst_bait = [(10,0,10), (14,0,11), (33,0,26),(4,2,18),(4,20,30),(39,117,137),(12,5,21),(28,0,14),(32,5,14),(32,15,44),(36,0,9),(40,0,14),(2,1,15),(2,17,35),(5,160,168),(11,158,164),(13,116,131)]
inst_30 = []
inst_25late = [(32,160,190),(38,61,76),(39,446,466),(40,153,153+37),(39,269,329),(40,262,287),(38,7,42)]
inst_25late_extended = [(39,269,369),(40,153,190),(38,7,50),(38,61,105),(32,160,199),(39,446,486),(35,70,119),(38,106,130),(36,204,233),(30,57,94),(29,221,241),(40,262,312),(29,160,184),(30,0,24)]
inst_25_100P = [(38,131,131+80)]
# treatments = {'25*' : inst_25late,'25' : inst_25,'baits' : inst_bait, '30' : inst_30}
treatments = {'25_100' : inst_25_100P, '25*' : inst_25late,'25' : inst_25,'baits' : inst_bait, '30' : inst_30}
plate_number = {(9,0,11) : 296, (9,13,35) : 296, (3,0,19) : 340, (37,0,8) : 269,(11,0,30) : 314, (19,0,25) : 344, (13,0,25) : 298, (39,0,18) : 297, (35,0,15) : 351,(10,0,10) : 395,(14,0,11) : 399, (33,0,26) : 420, (4,2,18) : 423, (4,20,30) : 423,(8,0,17): 434 ,(8,20,30) : 434,(39,117,137) : 433, (12,5,21) : 436, (28,0,14): 405,(32,5,45):409,(36,0,9) : 419,(40,0,14) : 425,(2,1,15):435,(2,17,35):435,(5,160,168):382,(11,158,164) : 416,(13,116,131) : 424, (29,0,20) : 373,(32,15,44):409, (32,5,14) : 409, (40,153,153+37) : 69,(39,269,329) : 94, (40,262,287) : 102,(38,7,42) : 59, (32,160,190) : 152,(38,61,76) : 137,(39,446,466) : 26, (38,131,131+80):721}
comments = {395 : 'ignore', 399 : 'left', 405 : 'left', 409 : 'right', 416 : 'middle', 419 : 'middle', 420 : 'left', 423: 'right', 424 : 'left', 425 : 'middle', 433 : 'right', 435 : 'middle', 436 : 'left'} | 143.833333 | 656 | 0.551564 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 207 | 0.11993 |
6f92f4f38b2cf0fe2438ae66792a5537b2e53d3f | 30 | py | Python | version.py | iridiumcow/OoT-Randomizer | 3a5d2bebb2131e593f6611bd5c062ddd6fdac8ff | [
"MIT"
]
| null | null | null | version.py | iridiumcow/OoT-Randomizer | 3a5d2bebb2131e593f6611bd5c062ddd6fdac8ff | [
"MIT"
]
| null | null | null | version.py | iridiumcow/OoT-Randomizer | 3a5d2bebb2131e593f6611bd5c062ddd6fdac8ff | [
"MIT"
]
| null | null | null | __version__ = '5.2.158 f.LUM'
| 15 | 29 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.5 |
6f93e22cf26c9a478c3691514ddab933b92e050e | 280 | py | Python | scripts/test_process_traj.py | hyyh28/trajectory-transformer | 4a369b6d1c950c76d1792cf004644fa13040319c | [
"MIT"
]
| null | null | null | scripts/test_process_traj.py | hyyh28/trajectory-transformer | 4a369b6d1c950c76d1792cf004644fa13040319c | [
"MIT"
]
| null | null | null | scripts/test_process_traj.py | hyyh28/trajectory-transformer | 4a369b6d1c950c76d1792cf004644fa13040319c | [
"MIT"
]
| null | null | null | import numpy as np
import pickle
expert_file = 'maze_expert.npy'
imitation_agent_file = 'maze_agent.npy'
with open(imitation_agent_file, 'rb') as handle:
agent_data = pickle.load(handle)
with open(expert_file, 'rb') as handle:
expert_data = pickle.load(handle)
print("OK") | 31.111111 | 48 | 0.757143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.160714 |
6f949785191c10e7a989c8350cfc26a75e30b461 | 3,967 | py | Python | test/test_strings.py | harthur/celestial-snips-app | 5dfb86002e0d109c16c8d01ee77b0e909c263270 | [
"MIT"
]
| 1 | 2021-11-23T02:44:14.000Z | 2021-11-23T02:44:14.000Z | test/test_strings.py | harthur/celestial-snips-app | 5dfb86002e0d109c16c8d01ee77b0e909c263270 | [
"MIT"
]
| null | null | null | test/test_strings.py | harthur/celestial-snips-app | 5dfb86002e0d109c16c8d01ee77b0e909c263270 | [
"MIT"
]
| null | null | null | import unittest
from celestial import Celestial
from strings import CelestialStrings
from datetime import datetime
import pytest
import math
class TestCelestial(unittest.TestCase):
"""Testing the CelestialStrings class for generating celestial answers for TTS to read aloud"""
def setUp(self):
...
def test_get_local_time_str(self):
input = datetime(2019, 12, 1, 13, 24)
expected = "08:24AM"
self.assertEqual(CelestialStrings._get_local_time_str(input), expected)
def test_get_day_str_today(self):
start_dt = datetime(2019, 12, 1, 0, 0)
event_dt = datetime(2019, 12, 1, 3, 4)
expected = "today"
self.assertEqual(CelestialStrings._get_day_str(start_dt, event_dt), expected)
def test_get_day_str_tomorrow(self):
start_dt = datetime(2019, 12, 1, 0, 0)
event_dt = datetime(2019, 12, 2, 3, 4)
expected = "tomorrow"
self.assertEqual(CelestialStrings._get_day_str(start_dt, event_dt), expected)
def test_get_day_str_next_week(self):
start_dt = datetime(2019, 12, 1, 0, 0)
event_dt = datetime(2019, 12, 8, 3, 4)
expected = "Saturday, December 07"
self.assertEqual(CelestialStrings._get_day_str(start_dt, event_dt), expected)
def test_get_cardinal_str(self):
self.assertEqual(
CelestialStrings._get_cardinal_str(0), "north", "0 degrees is North"
)
self.assertEqual(
CelestialStrings._get_cardinal_str(50), "northeast", "50 degrees is NE",
)
self.assertEqual(
CelestialStrings._get_cardinal_str(88), "east", "88 degrees is East"
)
self.assertEqual(
CelestialStrings._get_cardinal_str(180), "south", "180 degrees is South"
)
self.assertEqual(
CelestialStrings._get_cardinal_str(350), "north", "350 degrees is North"
)
def test_get_cardinal_str_from_abbr(self):
self.assertEqual(CelestialStrings._get_cardinal_str_from_abbr("N"), "north")
self.assertEqual(
CelestialStrings._get_cardinal_str_from_abbr("SSE"), "south southeast"
)
def test_get_event_message(self):
body = "moon"
event = "rise"
dt = datetime(2019, 12, 1, 13, 24)
event_info = (dt, 120)
self.assertEqual(
CelestialStrings.get_event_message(body, event, event_info),
"The next moonrise is at 08:24AM Sunday, December 01, in the southeast",
)
def test_get_event_message_planet(self):
body = "venus"
event = "set"
dt = datetime(2019, 12, 1, 13, 24)
event_info = (dt, 120)
self.assertEqual(
CelestialStrings.get_event_message(body, event, event_info),
"The next venus set is at 08:24AM Sunday, December 01, in the southeast",
)
def test_get_moon_phase_message(self):
phase_info = ("waning", "crescent", 10)
self.assertEqual(
CelestialStrings.get_moon_phase_message(phase_info),
"The moon is a waning crescent",
)
def test_get_next_moon_event_message(self):
start_dt = datetime(2019, 12, 1, 0, 0)
event_dt = datetime(2019, 12, 8, 3, 4)
expected = "The next full moon is on Saturday, December 07, at 10:04PM"
self.assertEqual(
CelestialStrings.get_next_moon_event_message("full", event_dt), expected
)
def test_get_next_iss_sighting_message(self):
sighting = {
"alt_degrees": 66,
"approach_dir": "NW",
"depart_dir": "SE",
"duration_mins": 6,
"time": datetime(2020, 2, 7, 23, 51),
}
expected = "You can see the space station Friday, February 07 at 06:51PM, moving from the northwest to the southeast"
self.assertEqual(
CelestialStrings.get_next_iss_sighting_message(sighting), expected
)
| 34.198276 | 125 | 0.633224 | 3,822 | 0.963448 | 0 | 0 | 0 | 0 | 0 | 0 | 772 | 0.194605 |
6f960fdd0b967c14a7efcefaab212681557a8931 | 3,290 | py | Python | hiburn/config.py | OpenHisiIpCam/hiburn | 71d8ab3c5a87401a60cf125d441e25f8b7d3282c | [
"MIT"
]
| 8 | 2020-04-06T08:47:26.000Z | 2021-02-23T17:10:12.000Z | hiburn/config.py | OpenHisiIpCam/hiburn | 71d8ab3c5a87401a60cf125d441e25f8b7d3282c | [
"MIT"
]
| 2 | 2020-05-14T16:59:33.000Z | 2021-06-19T23:48:35.000Z | hiburn/config.py | OpenHisiIpCam/hiburn | 71d8ab3c5a87401a60cf125d441e25f8b7d3282c | [
"MIT"
]
| 2 | 2020-05-02T22:49:01.000Z | 2020-05-12T02:39:26.000Z |
import copy
import json
import logging
from . import utils
# -------------------------------------------------------------------------------------------------
def _update_config_by_args(config, args, prefix=""):
for k, v in config.items():
arg_name = prefix + k.replace("-", "_")
if isinstance(v, dict):
_update_config_by_args(v, args, arg_name + "_")
continue
arg_val = args.get(arg_name)
if arg_val is not None:
config[k] = arg_val
# -------------------------------------------------------------------------------------------------
def _add_args_from_config_desc(parser, config_desc, prefix="--"):
for key, val in config_desc.items():
arg_name = prefix + key
if isinstance(val, dict):
_add_args_from_config_desc(parser, val, arg_name + "-")
continue
if isinstance(val, tuple): # tuple contains: value, type, help
parser.add_argument(arg_name, type=val[1], metavar="V",
help="{}, default: {}".format(val[2], val[0]))
else:
t = utils.str2bool if isinstance(val, bool) else type(val)
parser.add_argument(arg_name, type=t, metavar="V",
help="{}, default: {}".format(type(val).__name__, val))
# -------------------------------------------------------------------------------------------------
def _update_config(dst, src, config_desc, path=""):
for key, new_val in src.items():
orig_val = dst.get(key)
field_desc = config_desc.get(key)
if isinstance(new_val, dict):
_update_config(orig_val, new_val, field_desc, "{}/{}".format(path, key))
else:
if (type(field_desc) is tuple) and (type(new_val) is str):
dst[key] = field_desc[1](new_val) # perform conversion
else:
dst[key] = type(field_desc)(new_val)
logging.debug("Set {}={} from config file".format(key, dst[key]))
# -------------------------------------------------------------------------------------------------
def _create_config_from_desc(config_desc):
res = {}
for key, val in config_desc.items():
if isinstance(val, tuple): # tuple contains: value, type, help
res[key] = val[1](val[0])
elif isinstance(val, dict):
res[key] = _create_config_from_desc(val)
else:
res[key] = val
return res
# -------------------------------------------------------------------------------------------------
def add_arguments_from_config_desc(parser, config_desc, read_from_file=False):
parser.add_argument("--config", "-C", type=str, metavar="PATH", help="Config path")
_add_args_from_config_desc(parser, config_desc)
# -------------------------------------------------------------------------------------------------
def get_config_from_args(args, config_desc):
config = _create_config_from_desc(config_desc)
if args.config is not None:
logging.debug("Update default config by user's one '{}'".format(args.config))
with open(args.config, "r") as f:
user_config = json.load(f)
_update_config(config, user_config, config_desc)
_update_config_by_args(config, vars(args))
return config
| 38.255814 | 99 | 0.50152 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 857 | 0.260486 |
6f9664a20f8e76c49fc5cef19c2bbf5957b352e2 | 746 | py | Python | examples/4_randomized_timing.py | jonikula/pyosmo | ab2ec1d97dd3e0faf5e2b62ac7bcfc1042e60575 | [
"MIT"
]
| null | null | null | examples/4_randomized_timing.py | jonikula/pyosmo | ab2ec1d97dd3e0faf5e2b62ac7bcfc1042e60575 | [
"MIT"
]
| null | null | null | examples/4_randomized_timing.py | jonikula/pyosmo | ab2ec1d97dd3e0faf5e2b62ac7bcfc1042e60575 | [
"MIT"
]
| null | null | null | from osmo import Osmo
import random
import time
class PositiveCalculator:
@staticmethod
def guard_something():
return True
@staticmethod
def step_something():
print("1. inside step")
# Random wait can be added inside test step
wait_ms = random.randint(200, 1000)
print("{} sleep inside step".format(wait_ms))
time.sleep(wait_ms / 1000)
print("2. inside step")
@staticmethod
def after():
# Random wait can be added also between test steps
wait_ms = random.randint(200, 3000)
print('Waiting for: {}ms between steps'.format(wait_ms))
time.sleep(wait_ms / 1000)
print('')
osmo = Osmo(PositiveCalculator())
osmo.generate()
| 22.606061 | 64 | 0.630027 | 643 | 0.86193 | 0 | 0 | 601 | 0.80563 | 0 | 0 | 182 | 0.243968 |
6f985fc4f5c199385b03c83c5b2b06f32b9bac8b | 3,475 | py | Python | ec2/physbam/utils.py | schinmayee/nimbus | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | [
"BSD-3-Clause"
]
| 20 | 2017-07-03T19:09:09.000Z | 2021-09-10T02:53:56.000Z | ec2/physbam/utils.py | schinmayee/nimbus | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | [
"BSD-3-Clause"
]
| null | null | null | ec2/physbam/utils.py | schinmayee/nimbus | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | [
"BSD-3-Clause"
]
| 9 | 2017-09-17T02:05:06.000Z | 2020-01-31T00:12:01.000Z | #!/usr/bin/env python
# Author: Omid Mashayekhi <[email protected]>
import sys
import os
import subprocess
import config
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
import ec2
temp_file_name = '_temp_file_'
def copy_binary_file_to_hosts(ip_addresses):
for ip in ip_addresses:
command = ''
command += ' scp -i ' + config.PRIVATE_KEY
command += ' -o UserKnownHostsFile=/dev/null '
command += ' -o StrictHostKeyChecking=no '
command += config.SOURCE_PATH + 'Water '
command += ' ubuntu@' + ip + ':' + config.REMOTE_PATH
subprocess.call(command, shell=True)
def collect_logs(ip_addresses):
subprocess.call(['rm', '-rf', config.OUTPUT_PATH])
subprocess.call(['mkdir', '-p', config.OUTPUT_PATH])
for ip in ip_addresses:
subprocess.Popen(['scp', '-q', '-r', '-i', config.PRIVATE_KEY,
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
'ubuntu@' + ip + ':' + config.FOLDER_PATH + 'mpi*.log',
config.OUTPUT_PATH])
subprocess.Popen(['scp', '-q', '-r', '-i', config.PRIVATE_KEY,
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
'ubuntu@' + ip + ':' + config.FOLDER_PATH + '*_lb_log.txt',
config.OUTPUT_PATH])
def clean_logs(ip_addresses):
command = ''
command += 'rm -rf ' + config.FOLDER_PATH + 'mpi*.log' + ';'
command += 'rm -rf ' + config.FOLDER_PATH + '*_lb_log.txt' + ';'
for ip in ip_addresses:
subprocess.Popen(['ssh', '-q', '-i', config.PRIVATE_KEY,
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
'ubuntu@' + ip, command])
def make_nodes_file_content(ip_addresses):
string = ""
for ip in ip_addresses:
print ip
string = string + ip + " cpu=8\n"
file = open(temp_file_name, 'w+')
file.write(string)
file.close()
def copy_nodes_file_to_hosts(ip_addresses):
make_nodes_file_content(ip_addresses)
for ip in ip_addresses:
command = ''
command += ' scp -i ' + config.PRIVATE_KEY
command += ' -o UserKnownHostsFile=/dev/null '
command += ' -o StrictHostKeyChecking=no '
command += temp_file_name
command += ' ubuntu@' + ip + ':' + config.REMOTE_PATH + config.NODES_FILE_NAME
subprocess.call(command, shell=True)
subprocess.call(['rm', temp_file_name])
def run_experiment(ip):
command = ''
command += ' ssh -i ' + config.PRIVATE_KEY
command += ' -o UserKnownHostsFile=/dev/null '
command += ' -o StrictHostKeyChecking=no '
command += ' ubuntu@' + ip
command += ' \"cd ' + config.REMOTE_PATH + '; '
command += ' mpirun -hostfile ' + config.NODES_FILE_NAME
command += ' -np ' + str(config.INSTANCE_NUM)
command += ' ./Water -scale ' + str(config.SCALE)
command += ' -e ' + str(config.FRAME_NUM) + '\" '
print command
subprocess.call(command, shell=True)
def collect_output_data(ip_addresses):
subprocess.call(['rm', '-rf', config.OUTPUT_NAME])
subprocess.call(['mkdir', '-p', config.OUTPUT_NAME])
process_num = 0
for ip in ip_addresses:
process_num += 1
command = ''
command += ' scp -r -i ' + config.PRIVATE_KEY
command += ' -o UserKnownHostsFile=/dev/null '
command += ' -o StrictHostKeyChecking=no '
command += ' ubuntu@' + ip + ':' + config.REMOTE_PATH + config.OUTPUT_NAME + str(process_num)
command += ' ' + config.OUTPUT_NAME
subprocess.call(command, shell=True)
| 26.937984 | 97 | 0.636835 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 933 | 0.268489 |
6f9bfb5d1846d96697c801d792f6539b53696861 | 75 | py | Python | Lista5/Lista5ex1.py | hugo-paiva/IntroducaoCienciasDaComputacao | a563f2fd5b773acbffaf4c858b86423b1130ae1f | [
"MIT"
]
| null | null | null | Lista5/Lista5ex1.py | hugo-paiva/IntroducaoCienciasDaComputacao | a563f2fd5b773acbffaf4c858b86423b1130ae1f | [
"MIT"
]
| null | null | null | Lista5/Lista5ex1.py | hugo-paiva/IntroducaoCienciasDaComputacao | a563f2fd5b773acbffaf4c858b86423b1130ae1f | [
"MIT"
]
| null | null | null | frase = input().split()
for palavra in frase:
print(palavra[2], end='') | 25 | 29 | 0.64 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0.026667 |
6f9d6fb07fd37fbb906d2b22ed6f41821f271822 | 198 | py | Python | ishashad.py | albusdemens/Twitter-mining-project | 67a2bd651459568bb74d64dde9cd76fc7925fd32 | [
"MIT"
]
| null | null | null | ishashad.py | albusdemens/Twitter-mining-project | 67a2bd651459568bb74d64dde9cd76fc7925fd32 | [
"MIT"
]
| null | null | null | ishashad.py | albusdemens/Twitter-mining-project | 67a2bd651459568bb74d64dde9cd76fc7925fd32 | [
"MIT"
]
| null | null | null | #To run the code, write
#from ishashad import ishashad
#then ishashad(number)
def ishashad(n):
if n % sum(map(int,str(n))) == 0:
print("True")
else:
print("False")
return | 18 | 37 | 0.60101 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.444444 |
6f9e12faf187ffa4348ad42a554949db236c4d07 | 2,393 | py | Python | tests/utils/test_utils.py | OpenLMIS-Angola/superset-patchup | b20a56b274fb4b2f5f765e5d91c290a28bccf635 | [
"Apache-2.0"
]
| null | null | null | tests/utils/test_utils.py | OpenLMIS-Angola/superset-patchup | b20a56b274fb4b2f5f765e5d91c290a28bccf635 | [
"Apache-2.0"
]
| null | null | null | tests/utils/test_utils.py | OpenLMIS-Angola/superset-patchup | b20a56b274fb4b2f5f765e5d91c290a28bccf635 | [
"Apache-2.0"
]
| null | null | null | """
This module tests utils
"""
from unittest.mock import patch, MagicMock
from superset_patchup.utils import get_complex_env_var, is_safe_url, is_valid_provider
from superset_patchup.oauth import CustomSecurityManager
class TestUtils:
"""
Class to test the utils module
"""
@patch("superset_patchup.utils.request")
def test_is_safe_url(self, mock):
"""
Test that only urls from the same domain are set as safe
by the is_safe_url function
"""
mock.host_url = "https://example.com"
assert is_safe_url("https://example.com") is True
assert is_safe_url("https://google.com") is False
@patch("superset_patchup.utils.os.getenv")
def test_get_complex_env_var_default(self, mock):
"""
Test that the get_complex_env_var function returns the default value
when the variable is not set
"""
mock.return_value = None
default_params = {"bean": "bag"}
params = get_complex_env_var("PARAMS", default_params)
# assert that the value returned is a dictionary
assert isinstance(params, dict)
# assert that the value returned is the default
assert params == default_params
@patch("superset_patchup.utils.os.getenv")
def test_get_complex_env_var(self, mock):
"""
Test that the get_complex_env_var function is able to return a
complex variable
"""
default_params = {"bean": "bag"}
# dict variable
params_value = {"spring": "bean"}
mock.return_value = str(params_value)
params = get_complex_env_var("PARAMS", default_params)
assert isinstance(params, dict)
assert params == params_value
# bool variable
mock.return_value = "True"
bool_params = get_complex_env_var("PARAMS", default_params)
assert isinstance(bool_params, bool)
assert bool_params is True
def test_case_insensitivity_for_provider(self):
"""
Test that provider information form user can be case insesitive,
to static standard strings that they will be checked against
"""
assert is_valid_provider("opensrp", "OpenSRP")
assert is_valid_provider("OnaData", 'onadata')
assert is_valid_provider("OpenlMis", "openlmis")
assert not is_valid_provider("oensrp", "OpenSrp")
| 34.681159 | 86 | 0.662766 | 2,170 | 0.906812 | 0 | 0 | 1,640 | 0.685332 | 0 | 0 | 1,017 | 0.42499 |
6f9e8adaed53c25080171ad9a2dca161824d3a7c | 14,215 | py | Python | xitorch/_tests/test_integrate.py | Jaikinator/xitorch | 053db8d27a7777baa7f572c2d37004e788ff4cb8 | [
"MIT"
]
| null | null | null | xitorch/_tests/test_integrate.py | Jaikinator/xitorch | 053db8d27a7777baa7f572c2d37004e788ff4cb8 | [
"MIT"
]
| null | null | null | xitorch/_tests/test_integrate.py | Jaikinator/xitorch | 053db8d27a7777baa7f572c2d37004e788ff4cb8 | [
"MIT"
]
| null | null | null | import random
import torch
import numpy as np
from torch.autograd import gradcheck, gradgradcheck
import xitorch as xt
from xitorch.integrate import quad, solve_ivp, mcquad, SQuad
from xitorch._tests.utils import device_dtype_float_test
################################## quadrature ##################################
class IntegrationNNModule(torch.nn.Module):
# cos(a*x + b * c)
def __init__(self, a, b):
super(IntegrationNNModule, self).__init__()
self.a = a
self.b = b
def forward(self, x, c):
return torch.cos(self.a * x + self.b * c)
class IntegrationModule(xt.EditableModule):
# cos(a*x + b * c)
def __init__(self, a, b):
self.a = a
self.b = b
def forward(self, x, c):
return torch.cos(self.a * x + self.b * c)
def getparamnames(self, methodname, prefix=""):
return [prefix + "a", prefix + "b"]
class IntegrationNNMultiModule(torch.nn.Module):
# cos(a*x + b * c), sin(a*x + b*c)
def __init__(self, a, b):
super(IntegrationNNMultiModule, self).__init__()
self.a = a
self.b = b
def forward(self, x, c):
return torch.cos(self.a * x + self.b * c), torch.sin(self.a * x + self.b * c)
class IntegrationMultiModule(xt.EditableModule):
# cos(a*x + b * c), sin(a*x + b*c)
def __init__(self, a, b):
self.a = a
self.b = b
def forward(self, x, c):
return torch.cos(self.a * x + self.b * c), torch.sin(self.a * x + self.b * c)
def getparamnames(self, methodname, prefix=""):
return [prefix + "a", prefix + "b"]
class IntegrationInfModule(torch.nn.Module):
def __init__(self, w):
super(IntegrationInfModule, self).__init__()
self.w = w
def forward(self, x):
return torch.exp(-x * x / (2 * self.w * self.w))
@device_dtype_float_test(only64=True, additional_kwargs={
"clss": [IntegrationModule, IntegrationNNModule],
})
def test_quad(dtype, device, clss):
torch.manual_seed(100)
random.seed(100)
nr = 2
fwd_options = {
"method": "leggauss",
"n": 100,
}
a = torch.nn.Parameter(torch.rand((nr,), dtype=dtype, device=device).requires_grad_())
b = torch.nn.Parameter(torch.randn((nr,), dtype=dtype, device=device).requires_grad_())
c = torch.randn((nr,), dtype=dtype, device=device).requires_grad_()
xl = torch.zeros((1,), dtype=dtype, device=device).requires_grad_()
xu = (torch.ones((1,), dtype=dtype, device=device) * 0.5).requires_grad_()
module = clss(a, b)
y = quad(module.forward, xl, xu, params=(c,), **fwd_options)
ytrue = (torch.sin(a * xu + b * c) - torch.sin(a * xl + b * c)) / a
assert torch.allclose(y, ytrue)
def getloss(a, b, c, xl, xu):
module = clss(a, b)
y = quad(module.forward, xl, xu, params=(c,), **fwd_options)
return y
gradcheck(getloss, (a, b, c, xl, xu))
gradgradcheck(getloss, (a, b, c, xl, xu))
# check if not all parameters require grad
gradcheck(getloss, (a, b.detach(), c, xl, xu))
@device_dtype_float_test(only64=True, additional_kwargs={
"clss": [IntegrationMultiModule, IntegrationNNMultiModule],
})
def test_quad_multi(dtype, device, clss):
torch.manual_seed(100)
random.seed(100)
nr = 4
fwd_options = {
"method": "leggauss",
"n": 100,
}
a = torch.nn.Parameter(torch.rand((nr,), dtype=dtype, device=device).requires_grad_())
b = torch.nn.Parameter(torch.randn((nr,), dtype=dtype, device=device).requires_grad_())
c = torch.randn((nr,), dtype=dtype, device=device).requires_grad_()
xl = torch.zeros((1,), dtype=dtype, device=device).requires_grad_()
xu = (torch.ones((1,), dtype=dtype, device=device) * 0.5).requires_grad_()
module = clss(a, b)
y = quad(module.forward, xl, xu, params=(c,), **fwd_options)
ytrue0 = (torch.sin(a * xu + b * c) - torch.sin(a * xl + b * c)) / a
ytrue1 = (-torch.cos(a * xu + b * c) + torch.cos(a * xl + b * c)) / a
assert len(y) == 2
assert torch.allclose(y[0], ytrue0)
assert torch.allclose(y[1], ytrue1)
@device_dtype_float_test(only64=True, additional_kwargs={
"totensor": [True, False]
})
def test_quad_inf(dtype, device, totensor):
torch.manual_seed(100)
random.seed(100)
nr = 4
fwd_options = {
"method": "leggauss",
"n": 100,
}
w = torch.nn.Parameter(torch.abs(torch.randn((nr,), dtype=dtype, device=device)).requires_grad_())
if totensor:
xl = torch.tensor(-float("inf"), dtype=dtype, device=device)
xu = torch.tensor(float("inf"), dtype=dtype, device=device)
else:
xl = -float("inf")
xu = float("inf")
def get_loss(w):
module = IntegrationInfModule(w)
y = quad(module.forward, xl, xu, params=[], **fwd_options)
return y
y = get_loss(w)
ytrue = w * np.sqrt(2 * np.pi)
assert torch.allclose(y, ytrue)
if totensor:
gradcheck(get_loss, (w,))
gradgradcheck(get_loss, (w,))
################################## ivp ##################################
class IVPNNModule(torch.nn.Module):
# dydt: -a * y * t - b * y - c * y
def __init__(self, a, b):
super(IVPNNModule, self).__init__()
self.a = a
self.b = b
def forward(self, t, y, c):
return -self.a * y * t - self.b * y - c * y
class IVPModule(xt.EditableModule):
# dydt: -a * y * t - b * y - c * y
def __init__(self, a, b):
self.a = a
self.b = b
def forward(self, t, y, c):
return -self.a * y * t - self.b * y - c * y
def getparamnames(self, methodname, prefix=""):
return [prefix + "a", prefix + "b"]
@device_dtype_float_test(only64=True, additional_kwargs={
"clss": [IVPModule, IVPNNModule],
})
def test_ivp(dtype, device, clss):
torch.manual_seed(100)
random.seed(100)
nr = 2
nt = 5
t0 = 0.0
t1 = 0.2
fwd_options = {
"method": "rk4",
}
a = torch.nn.Parameter(torch.rand((nr,), dtype=dtype, device=device).requires_grad_())
b = torch.nn.Parameter(torch.randn((nr,), dtype=dtype, device=device).requires_grad_())
c = torch.randn((nr,), dtype=dtype, device=device).requires_grad_()
ts = torch.linspace(t0, t1, nt, dtype=dtype, device=device).requires_grad_()
y0 = torch.rand((nr,), dtype=dtype, device=device).requires_grad_()
ts1 = ts.unsqueeze(-1)
def getoutput(a, b, c, ts, y0):
module = clss(a, b)
yt = solve_ivp(module.forward, ts, y0, params=(c,), **fwd_options)
return yt
yt = getoutput(a, b, c, ts, y0)
yt_true = y0 * torch.exp(-(0.5 * a * (ts1 + t0) + b + c) * (ts1 - t0))
assert torch.allclose(yt, yt_true)
gradcheck(getoutput, (a, b, c, ts, y0))
gradgradcheck(getoutput, (a, b, c, ts, y0))
@device_dtype_float_test(only64=True, additional_kwargs={
"method_tol": [
("rk4", (1e-8, 1e-5)),
("rk38", (1e-8, 1e-5)),
("rk45", (1e-8, 1e-5)),
("rk23", (1e-6, 1e-4)),
],
"clss": [IVPModule, IVPNNModule],
})
def test_ivp_methods(dtype, device, method_tol, clss):
torch.manual_seed(100)
random.seed(100)
nr = 2
nb = 3 # batch dimension
nt = 5
t0 = 0.0
t1 = 0.2
a = torch.nn.Parameter(torch.rand((nr,), dtype=dtype, device=device).requires_grad_())
b = torch.nn.Parameter(torch.randn((nr,), dtype=dtype, device=device).requires_grad_())
c = torch.randn((nr,), dtype=dtype, device=device).requires_grad_()
ts = torch.linspace(t0, t1, nt, dtype=dtype, device=device).requires_grad_()
y0 = torch.rand((nb, nr), dtype=dtype, device=device).requires_grad_()
ts1 = ts.unsqueeze(-1).unsqueeze(-1)
method, (rtol, atol) = method_tol
fwd_options = {
"method": method,
}
def getoutput(a, b, c, ts, y0):
module = clss(a, b)
yt = solve_ivp(module.forward, ts, y0, params=(c,), **fwd_options)
return yt
yt = getoutput(a, b, c, ts, y0)
yt_true = y0 * torch.exp(-(0.5 * a * (ts1 + t0) + b + c) * (ts1 - t0))
assert torch.allclose(yt, yt_true, rtol=rtol, atol=atol)
################################## mcquad ##################################
class MCQuadLogProbNNModule(torch.nn.Module):
def __init__(self, w):
super(MCQuadLogProbNNModule, self).__init__()
self.w = w
def forward(self, x):
# x, w are single-element tensors
return -x * x / (2 * self.w * self.w)
class MCQuadFcnModule(xt.EditableModule):
def __init__(self, a):
self.a = a
def forward(self, x):
# return self.a*self.a * x * x
return torch.exp(-x * x / (2 * self.a * self.a))
def getparamnames(self, methodname, prefix=""):
return [prefix + "a"]
def get_true_output(w, a):
# return a*a*w*w
return 1.0 / torch.sqrt(1 + w * w / (a * a))
@device_dtype_float_test(only64=True, additional_kwargs={
"method": ["mh", "_dummy1d"],
})
def test_mcquad(dtype, device, method):
torch.manual_seed(100)
random.seed(100)
w = torch.nn.Parameter(torch.tensor(1.2, dtype=dtype, device=device))
a = torch.tensor(0.3, dtype=dtype, device=device).requires_grad_()
x0 = torch.tensor(0.0, dtype=dtype, device=device)
if method == "mh":
fwd_options = {
"method": "mh",
"step_size": 0.6,
"nsamples": 10000,
"nburnout": 2,
}
else:
# using deterministic forward method just to check the backward operation
fwd_options = {
"method": "_dummy1d",
"nsamples": 100,
"lb": -float("inf"),
"ub": float("inf"),
}
def getoutput(w, a, x0):
logp = MCQuadLogProbNNModule(w)
fcn = MCQuadFcnModule(a)
res = mcquad(fcn.forward, logp.forward, x0, fparams=[], pparams=[], **fwd_options)
return res
rtol = 2e-2 if method != "_dummy1d" else 1e-3
epf = getoutput(w, a, x0)
epf_true = get_true_output(w, a)
assert torch.allclose(epf, epf_true, rtol=rtol)
# skip gradient check if it is not the deterministic method
if method != "_dummy1d":
return
# manually check the gradient
g = torch.tensor(0.7, dtype=dtype, device=device).reshape(epf.shape).requires_grad_()
ga, gw = torch.autograd.grad(epf, (a, w), grad_outputs=g, create_graph=True)
# different implementation
ga2, gw2 = torch.autograd.grad(epf, (a, w), grad_outputs=g, retain_graph=True, create_graph=False)
ga_true, gw_true = torch.autograd.grad(epf_true, (a, w), grad_outputs=g, create_graph=True)
assert torch.allclose(gw, gw_true)
assert torch.allclose(ga, ga_true)
assert torch.allclose(gw2, gw_true)
assert torch.allclose(ga2, ga_true)
ggaw, ggaa, ggag = torch.autograd.grad(ga, (w, a, g), retain_graph=True, allow_unused=True)
ggaw_true, ggaa_true, ggag_true = torch.autograd.grad(ga_true, (w, a, g), retain_graph=True, allow_unused=True)
print("ggaw", ggaw, ggaw_true, (ggaw - ggaw_true) / ggaw_true)
print("ggaa", ggaa, ggaa_true, (ggaa - ggaa_true) / ggaa_true)
print("ggag", ggag, ggag_true, (ggag - ggag_true) / ggag_true)
assert torch.allclose(ggaa, ggaa_true)
assert torch.allclose(ggag, ggag_true)
ggww, ggwa, ggwg = torch.autograd.grad(gw, (w, a, g), allow_unused=True)
ggww_true, ggwa_true, ggwg_true = torch.autograd.grad(gw_true, (w, a, g), allow_unused=True)
print("ggwa", ggwa, ggwa_true, (ggwa - ggwa_true) / ggwa_true)
print("ggwg", ggwg, ggwg_true, (ggwg - ggwg_true) / ggwg_true)
print("ggww", ggww, ggww_true, (ggww - ggww_true) / ggww_true)
assert torch.allclose(ggwa, ggwa_true)
assert torch.allclose(ggwg, ggwg_true)
assert torch.allclose(ggww, ggww_true)
################################## SQuad ##################################
@device_dtype_float_test(only64=True, additional_kwargs={
"imethod": list(enumerate(["trapz", "cspline"])),
})
def test_squad(dtype, device, imethod):
x = torch.tensor([0.0, 1.0, 2.0, 4.0, 5.0, 7.0],
dtype=dtype, device=device).requires_grad_()
y = torch.tensor([[1.0, 2.0, 2.0, 1.5, 1.2, 4.0],
[0.0, 0.8, 1.0, 1.5, 2.0, 1.4]],
dtype=dtype, device=device).requires_grad_()
# true values
ycumsum_trapz = torch.tensor( # obtained by calculating manually
[[0.0, 1.5, 3.5, 7.0, 8.35, 13.55],
[0.0, 0.4, 1.3, 3.8, 5.55, 8.95]],
dtype=dtype, device=device)
ycspline_natural = torch.tensor( # obtained using scipy's CubicSpline and quad
[[0.0, 1.5639104372355428, 3.6221791255289135, 7.2068053596614945, 8.4994887166897, 13.11119534565217],
[0.0, 0.43834626234132584, 1.3733074753173484, 3.724083215796897, 5.494693230049832, 9.181717209378409]],
dtype=dtype, device=device)
i, method = imethod
option = [{}, {"bc_type": "natural"}][i]
ytrue = [ycumsum_trapz, ycspline_natural][i]
def getval(x, y, tpe):
quad = SQuad(x, method=method, **option)
if tpe == "cumsum":
return quad.cumsum(y, dim=-1)
else:
return quad.integrate(y, dim=-1)
# getparamnames
quad = SQuad(x, method=method, **option)
quad.assertparams(quad.cumsum, y, dim=-1)
quad.assertparams(quad.integrate, y, dim=-1)
# cumsum
ycumsum = getval(x, y, "cumsum")
assert torch.allclose(ycumsum, ytrue)
# integrate
yintegrate = getval(x, y, "integrate")
assert torch.allclose(yintegrate, ytrue[..., -1])
gradcheck(getval, (x, y, "cumsum"))
gradgradcheck(getval, (x, y, "cumsum"))
gradcheck(getval, (x, y, "integrate"))
gradgradcheck(getval, (x, y, "integrate"))
if __name__ == "__main__":
# with torch.autograd.detect_anomaly():
test_mcquad()
| 35.987342 | 116 | 0.578825 | 2,724 | 0.191629 | 0 | 0 | 10,673 | 0.750827 | 0 | 0 | 1,483 | 0.104326 |
6fa1472d7ffbd2b29874a36ce2b115c7eb0c4881 | 120 | py | Python | biothings-hub/files/nde-hub/hub/dataload/sources/figshare/dumper.py | NIAID-Data-Ecosystem/nde-crawlers | c0aff7dc1625ece1e67d03ad56555da7dbdc4a11 | [
"Apache-2.0"
]
| null | null | null | biothings-hub/files/nde-hub/hub/dataload/sources/figshare/dumper.py | NIAID-Data-Ecosystem/nde-crawlers | c0aff7dc1625ece1e67d03ad56555da7dbdc4a11 | [
"Apache-2.0"
]
| null | null | null | biothings-hub/files/nde-hub/hub/dataload/sources/figshare/dumper.py | NIAID-Data-Ecosystem/nde-crawlers | c0aff7dc1625ece1e67d03ad56555da7dbdc4a11 | [
"Apache-2.0"
]
| null | null | null | from hub.dataload.nde import NDEFileSystemDumper
class FigshareDumper(NDEFileSystemDumper):
SRC_NAME = "figshare"
| 20 | 48 | 0.808333 | 68 | 0.566667 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.083333 |
6fa203b91e4061ab9a5aeb13af78a9c24d505f2c | 785 | py | Python | faiss_utils.py | yizt/keras-lbl-IvS | 3f98b698c56ae40954b4920da167f7c9e32024c8 | [
"Apache-2.0"
]
| 22 | 2019-01-13T12:56:56.000Z | 2020-11-03T01:39:20.000Z | faiss_utils.py | yizt/keras-lbl-IvS | 3f98b698c56ae40954b4920da167f7c9e32024c8 | [
"Apache-2.0"
]
| null | null | null | faiss_utils.py | yizt/keras-lbl-IvS | 3f98b698c56ae40954b4920da167f7c9e32024c8 | [
"Apache-2.0"
]
| 5 | 2019-04-01T09:19:55.000Z | 2020-05-26T14:38:06.000Z | # -*- coding: utf-8 -*-
"""
File Name: faiss_utils
Description : faiss工具类
Author : mick.yi
date: 2019/1/4
"""
import faiss
import numpy as np
def get_index(dimension):
sub_index = faiss.IndexFlatL2(dimension)
index = faiss.IndexIDMap(sub_index)
return index
def update_multi(index, vectors, ids):
"""
:param index:
:param vectors:
:param ids:
:return:
备注:ValueError: array is not C-contiguous
"""
idx = np.argsort(ids)
# 先删除再添加
index.remove_ids(ids[idx])
index.add_with_ids(vectors[idx], ids[idx])
def update_one(index, vector, label_id):
vectors = np.expand_dims(vector, axis=0)
ids = np.array([label_id])
update_multi(index, vectors, ids)
| 21.216216 | 47 | 0.602548 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 315 | 0.387454 |
6fa21afd208bf7323dcf7c8f05508069120736b0 | 475 | py | Python | relogio.py | Glightman/project_jogo_POO | c5557871f7e4a2a264c03180581cb2a6b1dec1b9 | [
"MIT"
]
| 1 | 2021-05-29T23:43:36.000Z | 2021-05-29T23:43:36.000Z | relogio.py | Glightman/project_jogo_POO | c5557871f7e4a2a264c03180581cb2a6b1dec1b9 | [
"MIT"
]
| null | null | null | relogio.py | Glightman/project_jogo_POO | c5557871f7e4a2a264c03180581cb2a6b1dec1b9 | [
"MIT"
]
| 2 | 2021-06-01T01:36:01.000Z | 2021-06-01T01:36:59.000Z | class Relogio:
def __init__(self):
self.horas = 6
self.minutos = 0
self.dia = 1
def __str__(self):
return f"{self.horas:02d}:{self.minutos:02d} do dia {self.dia:02d}"
def avancaTempo(self, minutos):
self.minutos += minutos
while(self.minutos >= 60):
self.minutos -= 60
self.horas += 1
if self.horas >= 24:
self.horas = 0
self.dia +=1
| 23.75 | 75 | 0.492632 | 472 | 0.993684 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.126316 |
6fa2447d022693958e9639f974d13fb89a57e078 | 3,201 | py | Python | custom/logistics/api.py | dslowikowski/commcare-hq | ad8885cf8dab69dc85cb64f37aeaf06106124797 | [
"BSD-3-Clause"
]
| 1 | 2017-02-10T03:14:51.000Z | 2017-02-10T03:14:51.000Z | custom/logistics/api.py | dslowikowski/commcare-hq | ad8885cf8dab69dc85cb64f37aeaf06106124797 | [
"BSD-3-Clause"
]
| null | null | null | custom/logistics/api.py | dslowikowski/commcare-hq | ad8885cf8dab69dc85cb64f37aeaf06106124797 | [
"BSD-3-Clause"
]
| null | null | null | import requests
from custom.api.utils import EndpointMixin
class MigrationException(Exception):
pass
class LogisticsEndpoint(EndpointMixin):
models_map = {}
def __init__(self, base_uri, username, password):
self.base_uri = base_uri.rstrip('/')
self.username = username
self.password = password
self.products_url = self._urlcombine(self.base_uri, '/products/')
self.webusers_url = self._urlcombine(self.base_uri, '/webusers/')
self.smsusers_url = self._urlcombine(self.base_uri, '/smsusers/')
self.locations_url = self._urlcombine(self.base_uri, '/locations/')
self.productstock_url = self._urlcombine(self.base_uri, '/productstocks/')
self.stocktransactions_url = self._urlcombine(self.base_uri, '/stocktransactions/')
def get_objects(self, url, params=None, filters=None, limit=1000, offset=0, **kwargs):
params = params if params else {}
if filters:
params.update(filters)
params.update({
'limit': limit,
'offset': offset
})
if 'next_url_params' in kwargs and kwargs['next_url_params']:
url = url + "?" + kwargs['next_url_params']
params = {}
response = requests.get(url, params=params,
auth=self._auth())
if response.status_code == 200 and 'objects' in response.json():
meta = response.json()['meta']
objects = response.json()['objects']
elif response.status_code == 401:
raise MigrationException('Invalid credentials.')
else:
raise MigrationException('Something went wrong during migration.')
return meta, objects
def get_products(self, **kwargs):
meta, products = self.get_objects(self.products_url, **kwargs)
for product in products:
yield (self.models_map['product'])(product)
def get_webusers(self, **kwargs):
meta, users = self.get_objects(self.webusers_url, **kwargs)
return meta, [(self.models_map['webuser'])(user) for user in users]
def get_smsusers(self, **kwargs):
meta, users = self.get_objects(self.smsusers_url, **kwargs)
return meta, [(self.models_map['smsuser'])(user) for user in users]
def get_location(self, id, params=None):
response = requests.get(self.locations_url + str(id) + "/", params=params, auth=self._auth())
return response.json()
def get_locations(self, **kwargs):
meta, locations = self.get_objects(self.locations_url, **kwargs)
return meta, [(self.models_map['location'])(location) for location in locations]
def get_productstocks(self, **kwargs):
meta, product_stocks = self.get_objects(self.productstock_url, **kwargs)
return meta, [(self.models_map['product_stock'])(product_stock) for product_stock in product_stocks]
def get_stocktransactions(self, **kwargs):
meta, stock_transactions = self.get_objects(self.stocktransactions_url, **kwargs)
return meta, [(self.models_map['stock_transaction'])(stock_transaction)
for stock_transaction in stock_transactions]
| 41.038462 | 108 | 0.650109 | 3,136 | 0.979694 | 193 | 0.060294 | 0 | 0 | 0 | 0 | 319 | 0.099656 |
6fa2c35d5d796a2e58e703cd256e4f54f2acff9f | 432 | py | Python | users/migrations/0004_auto_20191028_2154.py | icnmtrx/classified | c9515352e046293dacd66ba28cb32ae378edf832 | [
"MIT"
]
| null | null | null | users/migrations/0004_auto_20191028_2154.py | icnmtrx/classified | c9515352e046293dacd66ba28cb32ae378edf832 | [
"MIT"
]
| 2 | 2021-06-08T20:56:16.000Z | 2021-09-08T01:41:42.000Z | users/migrations/0004_auto_20191028_2154.py | icnmtrx/classified | c9515352e046293dacd66ba28cb32ae378edf832 | [
"MIT"
]
| null | null | null | # Generated by Django 2.2.5 on 2019-10-28 21:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20191028_1802'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='registered_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='date_registered'),
),
]
| 22.736842 | 90 | 0.62963 | 339 | 0.784722 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.277778 |
6fa38b8004759e97015cce47bf001559adb56f2e | 3,426 | py | Python | functionaltests/api/v2/test_pool.py | kiall/designate-py3 | 2b135d64bb0ced77327a563e037b270d1e5ca308 | [
"Apache-2.0"
]
| null | null | null | functionaltests/api/v2/test_pool.py | kiall/designate-py3 | 2b135d64bb0ced77327a563e037b270d1e5ca308 | [
"Apache-2.0"
]
| null | null | null | functionaltests/api/v2/test_pool.py | kiall/designate-py3 | 2b135d64bb0ced77327a563e037b270d1e5ca308 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest_lib import exceptions
from functionaltests.common import datagen
from functionaltests.api.v2.base import DesignateV2Test
from functionaltests.api.v2.clients.pool_client import PoolClient
class PoolTest(DesignateV2Test):
def _create_pool(self, pool_model, user='admin'):
resp, model = PoolClient.as_user(user).post_pool(pool_model)
self.assertEqual(resp.status, 201)
return resp, model
def test_list_pools(self):
self._create_pool(datagen.random_pool_data())
resp, model = PoolClient.as_user('admin').list_pools()
self.assertEqual(resp.status, 200)
self.assertGreater(len(model.pools), 0)
def test_create_pool(self):
self._create_pool(datagen.random_pool_data(), user='admin')
def test_update_pool(self):
post_model = datagen.random_pool_data()
resp, old_model = self._create_pool(post_model)
patch_model = datagen.random_pool_data()
resp, new_model = PoolClient.as_user('admin').patch_pool(
old_model.id, patch_model)
self.assertEqual(resp.status, 202)
resp, model = PoolClient.as_user('admin').get_pool(new_model.id)
self.assertEqual(resp.status, 200)
self.assertEqual(new_model.id, old_model.id)
self.assertEqual(new_model.name, patch_model.name)
def test_delete_pool(self):
resp, model = self._create_pool(datagen.random_pool_data())
resp, model = PoolClient.as_user('admin').delete_pool(model.id)
self.assertEqual(resp.status, 204)
def test_get_pool_404(self):
client = PoolClient.as_user('admin')
self._assert_exception(
exceptions.NotFound, 'pool_not_found', 404, client.get_pool,
str(uuid.uuid4()))
def test_update_pool_404(self):
model = datagen.random_pool_data()
client = PoolClient.as_user('admin')
self._assert_exception(
exceptions.NotFound, 'pool_not_found', 404, client.patch_pool,
str(uuid.uuid4()), model)
def test_delete_pool_404(self):
client = PoolClient.as_user('admin')
self._assert_exception(
exceptions.NotFound, 'pool_not_found', 404, client.delete_pool,
str(uuid.uuid4()))
def test_get_pool_invalid_uuid(self):
client = PoolClient.as_user('admin')
self._assert_invalid_uuid(client.get_pool, 'fooo')
def test_update_pool_invalid_uuid(self):
model = datagen.random_pool_data()
client = PoolClient.as_user('admin')
self._assert_invalid_uuid(client.patch_pool, 'fooo', model)
def test_delete_pool_invalid_uuid(self):
client = PoolClient.as_user('admin')
self._assert_invalid_uuid(client.get_pool, 'fooo')
| 36.83871 | 75 | 0.700525 | 2,553 | 0.745184 | 0 | 0 | 0 | 0 | 0 | 0 | 790 | 0.23059 |
6fa4cb77b9686bd974f4ba0799278420d18f452c | 1,928 | py | Python | fewshot/models/basic_model_VAT_ENT.py | AhmedAyad89/Consitent-Prototypical-Networks-Semi-Supervised-Few-Shot-Learning | b0b805733ee6c42cee5ddd9eace94edd29f6120d | [
"MIT"
]
| 22 | 2019-03-13T02:19:17.000Z | 2021-08-06T03:13:00.000Z | fewshot/models/basic_model_VAT_ENT.py | mattochal/Consitent-Prototypical-Networks-Semi-Supervised-Few-Shot-Learning | b0b805733ee6c42cee5ddd9eace94edd29f6120d | [
"MIT"
]
| 1 | 2019-07-27T14:33:02.000Z | 2020-06-01T11:03:20.000Z | fewshot/models/basic_model_VAT_ENT.py | mattochal/Consitent-Prototypical-Networks-Semi-Supervised-Few-Shot-Learning | b0b805733ee6c42cee5ddd9eace94edd29f6120d | [
"MIT"
]
| 5 | 2019-03-07T06:18:51.000Z | 2019-10-22T05:33:23.000Z | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import tensorflow as tf
from fewshot.models.kmeans_utils import compute_logits
from fewshot.models.model import Model
from fewshot.models.refine_model import RefineModel
from fewshot.models.basic_model_VAT import BasicModelVAT
from fewshot.models.model_factory import RegisterModel
from fewshot.models.nnlib import (concat, weight_variable)
from fewshot.utils import logger
from fewshot.utils.debug import debug_identity
from fewshot.models.SSL_utils import *
l2_norm = lambda t: tf.sqrt(tf.reduce_sum(tf.pow(t, 2)))
log = logger.get()
@RegisterModel("basic-VAT-ENT")
class BasicModelVAT_ENT(BasicModelVAT):
def get_train_op(self, logits, y_test):
loss, train_op = BasicModelVAT.get_train_op(self, logits, y_test)
config = self.config
ENT_weight = config.ENT_weight
VAT_ENT_step_size = config.VAT_ENT_step_size
logits = self._unlabel_logits
s = tf.shape(logits)
s = s[0]
p = tf.stop_gradient(self.h_unlabel)
affinity_matrix = compute_logits(p, p) - (tf.eye(s, dtype=tf.float32) * 1000.0)
# logits = tf.Print(logits, [tf.shape(point_logits)])
ENT_loss = walking_penalty(logits, affinity_matrix)
loss += ENT_weight * ENT_loss
ENT_opt = tf.train.AdamOptimizer(VAT_ENT_step_size * self.learn_rate, name="Entropy-optimizer")
ENT_grads_and_vars = ENT_opt.compute_gradients(loss)
train_op = ENT_opt.apply_gradients(ENT_grads_and_vars)
for gradient, variable in ENT_grads_and_vars:
if gradient is None:
gradient = tf.constant(0.0)
self.adv_summaries.append(tf.summary.scalar("ENT/gradients/" + variable.name, l2_norm(gradient), family="Grads"))
self.adv_summaries.append(tf.summary.histogram("ENT/gradients/" + variable.name, gradient, family="Grads"))
self.summaries.append(tf.summary.scalar('entropy loss', ENT_loss))
return loss, train_op
| 33.824561 | 116 | 0.769191 | 1,222 | 0.633817 | 0 | 0 | 1,254 | 0.650415 | 0 | 0 | 147 | 0.076245 |
6fa4dc608c43d4d875d4dbb6404a617e1898a215 | 169 | py | Python | FileStorage/utils/__init__.py | Thiefxt/FileStorage | db2882b2ea861f4412cb453edef6439501b13705 | [
"MIT"
]
| 1 | 2020-07-15T10:02:40.000Z | 2020-07-15T10:02:40.000Z | FileStorage/utils/__init__.py | Thiefxt/FileStorage | db2882b2ea861f4412cb453edef6439501b13705 | [
"MIT"
]
| null | null | null | FileStorage/utils/__init__.py | Thiefxt/FileStorage | db2882b2ea861f4412cb453edef6439501b13705 | [
"MIT"
]
| null | null | null | """
@Author : xiaotao
@Email : [email protected]
@Lost modifid : 2020/4/24 10:02
@Filename : __init__.py.py
@Description :
@Software : PyCharm
""" | 21.125 | 32 | 0.609467 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 169 | 1 |
6fa61d901e4c25cb458862fd5a69f1d44a079c88 | 3,864 | py | Python | molsysmt/tools/items.py | dprada/molsysmt | 83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d | [
"MIT"
]
| null | null | null | molsysmt/tools/items.py | dprada/molsysmt | 83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d | [
"MIT"
]
| null | null | null | molsysmt/tools/items.py | dprada/molsysmt | 83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d | [
"MIT"
]
| null | null | null | import numpy as np
import re as re
from molsysmt._private_tools.lists_and_tuples import is_list_or_tuple
def compatibles_for_a_single_molecular_system(items):
from molsysmt.basic.get_form import get_form
from molsysmt.basic.get import get
from molsysmt.forms import dict_has
output = True
if not is_list_or_tuple(items):
items=[items]
if len(items)>1:
list_n_atoms = []
list_n_groups = []
list_forms = []
for item in items:
tmp_form = get_form(item)
tmp_n_atoms, tmp_n_groups = get(item, target='atom', n_atoms=True, n_groups=True)
list_forms.append(tmp_form)
list_n_atoms.append(tmp_n_atoms)
list_n_groups.append(tmp_n_groups)
Not_none_values = filter(None.__ne__, list_n_atoms)
set_n_atoms = set(Not_none_values)
if len(set_n_atoms)>1:
output = False
if output:
Not_none_values = filter(None.__ne__, list_n_groups)
set_n_groups = set(Not_none_values)
if len(set_n_groups)>1:
output = False
return output
def has_topology(items):
from molsysmt.basic import get_form
from molsysmt.basic import dict_get
if type(items) in [list, tuple]:
output = []
for item in items:
form_in = get_form(item)
w_topology = dict_get[form_in]["system"]["has_topology"](item)
output.append(w_topology)
else:
form_in = get_form(items)
output = dict_get[form_in]["system"]["has_topology"](items)
return output
def has_trajectory(items):
from molsysmt.basic import get_form
from molsysmt.basic import dict_get
if type(items) in [list, tuple]:
output = []
for item in items:
form_in = get_form(item)
w_trajectory = dict_get[form_in]["system"]["has_trajectory"](item)
output.append(w_trajectory)
else:
form_in = get_form(items)
output = dict_get[form_in]["system"]["has_trajectory"](items)
return output
def has_coordinates(items):
from molsysmt.basic import get_form
from molsysmt.basic import dict_get
if type(items) in [list, tuple]:
output = []
for item in items:
form_in = get_form(item)
w_coordinates = dict_get[form_in]["system"]["has_coordinates"](item)
output.append(w_coordinates)
else:
form_in = get_form(items)
output = dict_get[form_in]["system"]["has_coordinates"](items)
return output
def has_box(items):
from molsysmt.basic import get_form
from molsysmt.basic import dict_get
if type(items) in [list, tuple]:
output = []
for item in items:
form_in = get_form(item)
w_box = dict_get[form_in]["system"]["has_box"](item)
output.append(w_box)
else:
form_in = get_form(items)
output = dict_get[form_in]["system"]["has_box"](items)
return output
def item_is_file(item):
from molsysmt.forms import file_extensions_recognized
output = False
if type(item) is str:
file_extension = item.split('.')[-1].lower()
if file_extension in file_extensions_recognized:
output = 'file:'+file_extension
return output
def item_is_string(item):
from molsysmt.forms import string_names_recognized
from .strings import guess_form_from_string
output = False
if type(item) is str:
if ':' in item:
string_name = item.split(':')[0]
if string_name in string_names_recognized:
output = 'string:'+string_name
if output==False:
output = guess_form_from_string(item)
if output is None:
output = False
return output
return output
| 26.465753 | 93 | 0.626812 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 207 | 0.053571 |
6fa63eda0afd91d2591e9bd285c10c6a046d2252 | 5,021 | py | Python | runner/monitor.py | wynterl/federated-learning-lib | 5d6cc0a5f4a45d97525ff2dec328b3901b71b7a3 | [
"IBM-pibs"
]
| null | null | null | runner/monitor.py | wynterl/federated-learning-lib | 5d6cc0a5f4a45d97525ff2dec328b3901b71b7a3 | [
"IBM-pibs"
]
| null | null | null | runner/monitor.py | wynterl/federated-learning-lib | 5d6cc0a5f4a45d97525ff2dec328b3901b71b7a3 | [
"IBM-pibs"
]
| 1 | 2021-05-11T05:09:30.000Z | 2021-05-11T05:09:30.000Z | #!/usr/bin/env python3
import argparse
import subprocess as sp
import select
import sys
import time
import yaml
if __name__ == '__main__':
"""
We can daemonize our connections to our remote machines, list the FL processes on remote
machines, or kill FL processes on remote machines. We can either pass a specfic run's metadata
file, or we can use a 'global' metadata file to list all processes on a list of machines.
"""
parser = argparse.ArgumentParser()
parser.add_argument('action', choices=['daemonize', 'list', 'kill'])
parser.add_argument('--config')
# read metadata config for the specified run
args = parser.parse_args()
if (args.config):
with open(args.config) as config_file:
config = yaml.load(config_file.read(), Loader=yaml.Loader)
if 'timestamp' in config:
machines = [config['agg_machine']] + config['party_machines']
usernames = [config['agg_username']] + config['party_usernames']
run_id = config['timestamp'] if 'timestamp' in config else ''
else:
machines = config['machines']
usernames = config['usernames']
run_id = ''
localp = sp.Popen('mkdir -p {}/.ssh'.format(config['local_staging_dir']).split())
exit_code = localp.wait()
# decide what to run based on input
if args.action == 'daemonize':
daemonize_cmd = 'ssh '\
'-o "ControlMaster=auto" '\
'-o "ControlPath={}/.ssh/master-%r@%h:%p" '\
'-o "ControlPersist=yes" '\
'-Nn {}@{}'
cmds = [daemonize_cmd.format(config['local_staging_dir'], u, m) for m, u in zip(machines,usernames)]
elif args.action == 'list':
if 'timestamp' in config:
list_cmd = \
'ssh -o "ControlMaster=no" -o "ControlPath={}/.ssh/master-%r@%h:%p" {}@{} '\
'"set -o pipefail; '\
'pgrep -u {} -f \\"bash.*run_agg\.py.*{}|bash.*run_party\.py.*{}\\" '\
'| xargs --no-run-if-empty -I@ pgrep -P @ -f \\"run\\" -a"'
cmds = [list_cmd.format(config['local_staging_dir'], u, m, u, run_id, run_id, u, run_id, run_id) for m, u in zip(machines, usernames)]
else:
list_cmd = \
'ssh -o "ControlMaster=no" -o "ControlPath={}/.ssh/master-%r@%h:%p" {}@{} ' \
'"set -o pipefail; '\
'pgrep -f \\"bash.*run_agg\.py|bash.*run_party\.py\\" '\
'| tee >(xargs --no-run-if-empty -I@ pgrep -P @) '\
'| xargs --no-run-if-empty ps -o user:8,pid,ppid,cmd p"'
cmds = [list_cmd.format(config['local_staging_dir'], u, m) for m, u in zip(machines, usernames)]
elif args.action == 'kill':
if 'timestamp' in config:
kill_cmd = \
'ssh -o "ControlMaster=no" -o "ControlPath={}/.ssh/master-%r@%h:%p" {}@{} '\
'"set -o pipefail; '\
'pgrep -u {} -f \\"bash.*run_agg\.py.*{}|run_party\.py.*{}\\" '\
'| tee >(xargs --no-run-if-empty pgrep -P) | tee >(xargs --no-run-if-empty kill)"'
cmds = [kill_cmd.format(config['local_staging_dir'], u, m, u, run_id, run_id, u, run_id, run_id) for m, u in zip(machines, usernames)]
else:
kill_cmd = \
'ssh -o "ControlMaster=no" -o "ControlPath={}/.ssh/master-%r@%h:%p" {}@{} '\
'"set -o pipefail; '\
'pgrep -u {} -f \\"run_agg\.py|run_party\.py\\" '\
'&& pkill -u {} -f \\"run_agg\.py|run_party\.py\\""'
cmds = [kill_cmd.format(config['local_staging_dir'], u, m, u, u) for m, u in zip(machines, usernames)]
else:
print('Action not handled. Exiting.')
exit(1)
# start all processes
procs = [sp.Popen(c, stdout=sp.PIPE, stderr=sp.PIPE, shell=True, universal_newlines=True) for c in cmds]
stdout = ['' for _ in machines]
stderr = ['' for _ in machines]
loops = 0
# wait for output and finally exit when processes end, obtaining all output
polls = list(p.poll() for p in procs)
while any(r == None for r in polls):
ret = select.select([p.stdout.fileno() for p,r in zip(procs,polls) if r == None], [], [])
for fd in ret[0]:
for i,p in enumerate(procs):
if p.stdout.fileno() == fd:
stdout[i] += '\t{}'.format(p.stdout.readline())
polls = tuple(p.poll() for p in procs)
loops += 1
for i,p in enumerate(procs):
for line in p.stdout:
stdout[i] += '\t{}'.format(line)
for line in p.stderr:
stderr[i] += '\t{}'.format(line)
if not stdout[i].strip():
stderr[i] += '\tNo processes found.\n'
# print output
if args.action != 'daemonize':
for i,m in enumerate(machines):
print("{}:".format(m))
if stdout[i].strip():
print(stdout[i])
if stderr[i].strip():
print(stderr[i])
| 44.04386 | 146 | 0.54292 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,974 | 0.393149 |
6fa6d9568fc3d20a8fc6e092cfbe18e8adeb1122 | 134 | py | Python | brian2sampledevice/__init__.py | brian-team/brian2sampledevice | d4b4e932aff35b3350e22039b05fcb022623e5fc | [
"MIT"
]
| null | null | null | brian2sampledevice/__init__.py | brian-team/brian2sampledevice | d4b4e932aff35b3350e22039b05fcb022623e5fc | [
"MIT"
]
| null | null | null | brian2sampledevice/__init__.py | brian-team/brian2sampledevice | d4b4e932aff35b3350e22039b05fcb022623e5fc | [
"MIT"
]
| null | null | null | from .device import SampleDevice
from .codeobject import SampleDeviceCodeObject
__all__ = ['SampleDevice', 'SampleDeviceCodeObject']
| 26.8 | 52 | 0.828358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.283582 |
6fa6de489d3ecbdc05135c1a882460c438344d63 | 149 | py | Python | tests/node_test.py | allenai/beaker-py | 99c8d7f6e9938807ca5405964ef35633a19e8d68 | [
"Apache-2.0"
]
| null | null | null | tests/node_test.py | allenai/beaker-py | 99c8d7f6e9938807ca5405964ef35633a19e8d68 | [
"Apache-2.0"
]
| 20 | 2021-12-16T13:23:07.000Z | 2022-03-31T16:40:02.000Z | tests/node_test.py | allenai/beaker-py | 99c8d7f6e9938807ca5405964ef35633a19e8d68 | [
"Apache-2.0"
]
| null | null | null | from beaker import Beaker
def test_node_get(client: Beaker, beaker_node_id: str):
assert client.node.get(beaker_node_id).limits.gpu_count == 8
| 24.833333 | 64 | 0.778523 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6fa73ff97a0db35ad4fb506ba2a01779a69e92b2 | 5,984 | py | Python | crawling/data_crawler_set.py | CLUG-kr/cau_hashkeyword | f2b747ce8c5705ea58fd94d2a1d10110d6f4f511 | [
"MIT"
]
| 5 | 2019-01-20T06:12:47.000Z | 2019-06-27T13:29:26.000Z | crawling/data_crawler_set.py | AllyHyeseongKim/cau_hashkeyword | 1050dbc74d28765aec6e5870d6f3dae325f5caad | [
"MIT"
]
| 4 | 2019-01-10T14:58:45.000Z | 2019-05-16T05:02:53.000Z | crawling/data_crawler_set.py | AllyHyeseongKim/cau_hashkeyword | 1050dbc74d28765aec6e5870d6f3dae325f5caad | [
"MIT"
]
| 1 | 2019-02-20T09:11:54.000Z | 2019-02-20T09:11:54.000Z |
# coding: utf-8
# In[2]:
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
# Fetch the service account key JSON file contents
cred = credentials.Certificate('/Users/Solomon/Desktop/cau-hashkeyword-serviceAccountKey.json')
# Initialize the app with a service account, granting admin privileges
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://cau-hashkeyword.firebaseio.com'
})
ref = db.reference('server/saving-data/crawling')
# In[3]:
from bs4 import BeautifulSoup
from urllib.request import urlopen
from selenium import webdriver
import re
# In[3]:
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('window-size=1920x1080')
options.add_argument('disable-gpu')
# In[10]:
cau_title_list = []
cau_date_list = []
cau_url_list = []
driver = webdriver.Chrome("/usr/local/bin/chromedriver", chrome_options=options)
driver.get("https://www.cau.ac.kr/cms/FR_CON/index.do?MENU_ID=100")
driver.implicitly_wait(3)
cau_base_url = "https://www.cau.ac.kr/cms/FR_CON/BoardView.do?MENU_ID=100&CONTENTS_NO=1&SITE_NO=2&P_TAB_NO=&TAB_NO=&BOARD_SEQ=4&BOARD_CATEGORY_NO=&BBS_SEQ="
# BBS_SEQ=19642 (id=board_19642)
board_list = driver.find_element_by_id("tbody").find_elements_by_tag_name("li")
board_list.reverse()
# count = 0
for item in board_list:
# if count < 10: pass # 테스트용
# else:
cau_title_list.append(item.find_element_by_class_name("txtL").find_element_by_tag_name('a').text)
cau_date_list.append(item.find_element_by_class_name("txtInfo").find_element_by_class_name("date").text)
cau_url_list.append(cau_base_url + item.get_attribute("id").replace("board_",""))
# count += 1
driver.close()
# list 앞에 원소를 추가할 때, insert(0,data) 사용 시 O(n)
# class collections.deque([iterable[, maxlen]])의 dequeue() 사용 시 O(1)
# High Performance를 원한다면 사용하자.
# 혹은 list reverse 후, append 계속 사용 (단, reverse의 경우 O(n))
# In[11]:
lib_title_list = []
lib_date_list = []
lib_url_list = []
driver = webdriver.Chrome("/usr/local/bin/chromedriver", chrome_options=options)
driver.get("https://library.cau.ac.kr/#/bbs/notice?offset=0&max=20")
driver.implicitly_wait(3)
try:
# tbody[0]는 회색 상단 공지 부분으로 아래 공지 중에서 중요한 것들만 올려놓은듯. 즉, 겹치는 내용임.
board_list = driver.find_elements_by_tag_name("tbody")[1].find_elements_by_class_name("ikc-item")
board_list.reverse()
for item in board_list: # tbody 검색후 ikc-item 검색시, 가끔씩 IndexError: list index out of range 발생 (이유 모름)
lib_title_list.append(item.find_elements_by_tag_name("td")[2].find_element_by_tag_name('a').text) # 대체
lib_date_list.append(item.find_elements_by_tag_name("td")[3].find_elements_by_tag_name("span")[1].text)
except IndexError:
print("IndexError")
pass
lib_base_url = "https://library.cau.ac.kr/#/bbs/notice/"
# 사이에 id 추가
lib_sub_url = "?offset=0&max=20"
# url id는 어떻게 가져올까..
driver.close()
# In[14]:
# 노란색 공지 부분만 가져온다
dorm_title_list = []
dorm_date_list = []
dorm_url_list = []
dormnotice_url = "https://dormitory.cau.ac.kr/bbs/bbs_list.php?bbsID=notice"
dormnotice_page = urlopen(dormnotice_url)
dormnotice_soup = BeautifulSoup(dormnotice_page, "lxml")
dormnotice_list = dormnotice_soup.find(id='content').find('div').find_all('tr',{'bgcolor':'#fffcdb'})
dormnotice_list.reverse()
if dormnotice_list == []:
print("No data")
else :
for item in dormnotice_list:
dorm_title_list.append(item.find('span',class_='bbsTitle').get_text())
dorm_url_list.append(item.find('a')['href'])
dorm_date_list.append("20" + item.find_all('td',class_='t_c')[3].get_text())
#try-except 적용하기?
# In[13]:
ict_title_list = []
ict_date_list = []
ict_url_list = []
ictnotice_url = "http://ict.cau.ac.kr/20150610/sub05/sub05_01_list.php"
ictnotice_page = urlopen(ictnotice_url)
ictnotice_soup = BeautifulSoup(ictnotice_page, "lxml")
ict_base_url = "http://ict.cau.ac.kr/20150610/sub05/sub05_01_list.php?cmd=view&cpage=1&idx="
# 사이에 id 작성
ict_sub_url = "&search_gbn=1&search_keyword="
ictnotice_list = ictnotice_soup.find('tbody').find_all('tr')
ictnotice_list.reverse()
if ictnotice_list == []:
print("No data")
else:
for item in ictnotice_list:
ict_title_list.append(item.find('td',class_='cont').find('a').get_text())
ict_url_list.append(ict_base_url + item.find('td',class_='cont').find('a')['href'][-7:-3] + ict_sub_url)
ict_date_list.append(item.find_all('td')[2].get_text())
# In[10]:
# 공지표시 되어있는 게시글 제목도 수집? (겹치는 내용임)
cse_title_list = []
cse_date_list = []
cse_url_list = []
csenotice_url = "http://cse.cau.ac.kr/20141201/sub05/sub0501.php"
csenotice_page = urlopen(csenotice_url)
csenotice_soup = BeautifulSoup(csenotice_page, "lxml")
csenotice_list = csenotice_soup.find('table',class_='nlist').find_all('tr')
csenotice_list.reverse()
if csenotice_list == []:
print("No data")
else:
for item in csenotice_list:
if item.find('td').get_text() != '':
cse_title_list.append(re.sub('[\n\t\xa0]','',item.find('a').get_text())) # sub메소드 사용법 검토하기
cse_url_list.append(csenotice_url + item.find_all('td')[2].find('a')['href'])
cse_date_list.append(item.find_all('td')[4].get_text())
# In[15]:
# Firebase에 크롤링한 데이터 저장하기
import json
from collections import OrderedDict
crawling_data = OrderedDict()
crawling_data['caunotice'] = {'title':cau_title_list, 'date':cau_date_list, 'url':cau_url_list}
crawling_data['library'] = {'title':lib_title_list, 'date':lib_date_list, 'url':"https://library.cau.ac.kr/#/bbs/notice?offset=0&max=20"}
crawling_data['dorm'] = {'title':dorm_title_list, 'date':dorm_date_list, 'url':dorm_url_list}
crawling_data['ict'] = {'title':ict_title_list, 'date':ict_date_list, 'url':ict_url_list}
crawling_data['cse'] = {'title':cse_title_list, 'date':cse_date_list, 'url':cse_url_list}
crawling_json = json.dumps(crawling_data, ensure_ascii=False, indent="\t")
webpage_ref = ref.child('webpages')
webpage_ref.set(json.loads(crawling_json))
| 30.070352 | 156 | 0.723429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,482 | 0.392349 |
6fa85d4b0b5bfa6ac386b4e088bb46a5cbd9b94a | 614 | py | Python | compose.py | luyao777/speech-robot | a00c9ac554b7b7a86af4a57d33acb50bbdc17822 | [
"Apache-2.0"
]
| null | null | null | compose.py | luyao777/speech-robot | a00c9ac554b7b7a86af4a57d33acb50bbdc17822 | [
"Apache-2.0"
]
| null | null | null | compose.py | luyao777/speech-robot | a00c9ac554b7b7a86af4a57d33acb50bbdc17822 | [
"Apache-2.0"
]
| null | null | null | #coding: utf-8
from aip import AipSpeech
from config import DefaultConfig as opt
class composer():
def __init__(self):
pass
def compose(self,text ='你好'):
#百度后台获取的秘�?
APP_ID = opt.baidu_app_id
API_KEY = opt.baidu_api_key
SECRET_KEY =opt.baidu_secret_key
client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)
result = client.synthesis(text,'zh',1,{
'vol':5,})
file_name = 'ans.mp3'
if not isinstance(result, dict):
with open(file_name, 'wb') as f:
f.write(result)
return file_name
| 26.695652 | 55 | 0.583062 | 551 | 0.866352 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.116352 |
6fa87cb16c669518081acc3c339dbfde1687dc05 | 8,754 | py | Python | tests/test_input_output.py | dpanici/DESC | e98a16394d02411952efc18cc6c009e5226b11e4 | [
"MIT"
]
| 1 | 2020-11-20T17:17:50.000Z | 2020-11-20T17:17:50.000Z | tests/test_input_output.py | dpanici/DESC | e98a16394d02411952efc18cc6c009e5226b11e4 | [
"MIT"
]
| 12 | 2020-11-19T05:22:13.000Z | 2020-12-15T03:50:33.000Z | tests/test_input_output.py | dpanici/DESC | e98a16394d02411952efc18cc6c009e5226b11e4 | [
"MIT"
]
| null | null | null | import unittest
import os
import pathlib
import h5py
from desc.input_reader import InputReader
from desc.equilibrium_io import hdf5Writer, hdf5Reader
from desc.configuration import Configuration, Equilibrium
#from desc.input_output import read_input
#class TestIO(unittest.TestCase):
# """tests for input/output functions"""
#
# def test_min_input(self):
# dirname = os.path.dirname(__file__)
# filename = os.path.join(dirname, 'MIN_INPUT')
# inputs = read_input(filename)
#
# self.assertEqual(len(inputs), 26)
class TestInputReader(unittest.TestCase):
def setUp(self):
self.argv0 = []
self.argv1 = ['nonexistant_input_file']
self.argv2 = ['./tests/MIN_INPUT']
def test_no_input_file(self):
with self.assertRaises(NameError):
ir = InputReader(cl_args=self.argv0)
def test_nonexistant_input_file(self):
with self.assertRaises(FileNotFoundError):
ir = InputReader(cl_args=self.argv1)
def test_min_input(self):
ir = InputReader(cl_args=self.argv2)
#self.assertEqual(ir.args.prog, 'DESC', 'Program is incorrect.')
self.assertEqual(ir.args.input_file[0], self.argv2[0],
'Input file name does not match')
#self.assertEqual(ir.output_path, self.argv2[0] + '.output',
# 'Default output file does not match.')
self.assertEqual(ir.input_path,
str(pathlib.Path('./'+self.argv2[0]).resolve()),
'Path to input file is incorrect.')
#Test defaults
self.assertFalse(ir.args.plot, 'plot is not default False')
self.assertFalse(ir.args.quiet, 'quiet is not default False')
self.assertFalse(ir.args.verbose, 'verbose is not default False')
#self.assertEqual(ir.args.vmec_path, '', "vmec path is not default ''")
#self.assertFalse(ir.args.gpuID, 'gpu argument was given')
self.assertFalse(ir.args.numpy, 'numpy is not default False')
self.assertEqual(os.environ['DESC_USE_NUMPY'], '', 'numpy environment '
'variable incorrect with default argument')
self.assertFalse(ir.args.version, 'version is not default False')
self.assertEqual(len(ir.inputs), 28, 'number of inputs does not match '
'number expected in MIN_INPUT')
# test equality of arguments
def test_np_environ(self):
argv = self.argv2 + ['--numpy']
ir = InputReader(cl_args=argv)
self.assertEqual(os.environ['DESC_USE_NUMPY'], 'True', 'numpy '
'environment variable incorrect on use')
def test_quiet_verbose(self):
ir = InputReader(self.argv2)
self.assertEqual(ir.inputs['verbose'], 1, "value of inputs['verbose'] "
"incorrect on no arguments")
argv = self.argv2 + ['-v']
ir = InputReader(argv)
self.assertEqual(ir.inputs['verbose'], 2, "value of inputs['verbose'] "
"incorrect on verbose argument")
argv.append('-q')
ir = InputReader(argv)
self.assertEqual(ir.inputs['verbose'], 0, "value of inputs['verbose'] "
"incorrect on quiet argument")
def test_vmec_to_desc_input(self):
pass
class MockObject:
def __init__(self):
self._save_attrs_ = ['a', 'b', 'c']
class Testhdf5Writer(unittest.TestCase):
def setUp(self):
self.filename = 'writer_test_file'
self.file_mode = 'w'
def test_given_filename(self):
writer = hdf5Writer(self.filename, self.file_mode)
self.assertFalse(writer.check_type(writer.target))
self.assertTrue(writer.check_type(writer.base))
self.assertTrue(writer._close_base_)
writer.close()
self.assertFalse(writer._close_base_)
def test_given_file(self):
f = h5py.File(self.filename, self.file_mode)
writer = hdf5Writer(f, self.file_mode)
self.assertTrue(writer.check_type(writer.target))
self.assertTrue(writer.check_type(writer.base))
self.assertFalse(writer._close_base_)
#with self.assertWarns(RuntimeWarning):
# writer.close()
self.assertFalse(writer._close_base_)
f.close()
def test_close_on_delete(self):
writer = hdf5Writer(self.filename, self.file_mode)
with self.assertRaises(OSError):
newwriter = hdf5Writer(self.filename, self.file_mode)
del writer
newwriter = hdf5Writer(self.filename, self.file_mode)
del newwriter
def test_write_dict(self):
thedict = {'1':1, '2':2, '3':3}
writer = hdf5Writer(self.filename, self.file_mode)
writer.write_dict(thedict)
writer.write_dict(thedict, where=writer.sub('subgroup'))
with self.assertRaises(SyntaxError):
writer.write_dict(thedict, where='not a writable type')
writer.close()
f = h5py.File(self.filename, 'r')
g = f['subgroup']
for key in thedict.keys():
self.assertTrue(key in f.keys())
self.assertTrue(key in g.keys())
f.close()
def test_write_obj(self):
mo = MockObject()
writer = hdf5Writer(self.filename, self.file_mode)
#writer should throw runtime warning if any save_attrs are undefined
with self.assertWarns(RuntimeWarning):
writer.write_obj(mo)
writer.close()
writer = hdf5Writer(self.filename, self.file_mode)
for name in mo._save_attrs_:
setattr(mo, name, name)
writer.write_obj(mo)
groupname = 'initial'
writer.write_obj(mo, where=writer.sub(groupname))
writer.close()
f = h5py.File(self.filename, 'r')
for key in mo._save_attrs_:
self.assertTrue(key in f.keys())
self.assertTrue(groupname in f.keys())
initial = f[groupname]
for key in mo._save_attrs_:
self.assertTrue(key in initial.keys())
f.close()
class Testhdf5Reader(unittest.TestCase):
def setUp(self):
self.filename = 'reader_test_file'
self.file_mode = 'r'
self.thedict = {'a':'a', 'b':'b', 'c':'c'}
f = h5py.File(self.filename, 'w')
self.subgroup = 'subgroup'
g = f.create_group(self.subgroup)
for key in self.thedict.keys():
f.create_dataset(key, data=self.thedict[key])
g.create_dataset(key, data=self.thedict[key])
f.close()
def test_given_filename(self):
reader = hdf5Reader(self.filename)
self.assertFalse(reader.check_type(reader.target))
self.assertTrue(reader.check_type(reader.base))
self.assertTrue(reader._close_base_)
reader.close()
self.assertFalse(reader._close_base_)
def test_given_file(self):
f = h5py.File(self.filename, self.file_mode)
reader = hdf5Reader(f)
self.assertTrue(reader.check_type(reader.target))
self.assertTrue(reader.check_type(reader.base))
self.assertFalse(reader._close_base_)
#with self.assertWarns(RuntimeWarning):
# reader.close()
self.assertFalse(reader._close_base_)
f.close()
#def test_close_on_delete(self):
# reader = hdf5Reader(self.filename)
# with self.assertRaises(OSError):
# newreader = hdf5Reader(self.filename)
# del reader
# newreader = hdf5Reader(self.filename)
# del newreader
def test_read_dict(self):
reader = hdf5Reader(self.filename)
newdict = {}
newsubdict = {}
otherdict = {}
reader.read_dict(newdict)
reader.read_dict(newsubdict, where=reader.sub(self.subgroup))
with self.assertRaises(SyntaxError):
reader.read_dict(otherdict, where='not a readable type')
reader.close()
if type(newdict['a']) is bytes:
for key in newdict.keys():
newdict[key] = newdict[key].decode('ascii')
for key in newsubdict.keys():
newsubdict[key] = newsubdict[key].decode('ascii')
self.assertTrue(self.thedict == newdict)
self.assertTrue(self.thedict == newsubdict)
def test_read_obj(self):
mo = MockObject()
reader = hdf5Reader(self.filename)
reader.read_obj(mo)
mo._save_attrs_ += '4'
with self.assertWarns(RuntimeWarning):
reader.read_obj(mo)
del mo._save_attrs_[-1]
submo = MockObject()
reader.read_obj(submo, where=reader.sub(self.subgroup))
for key in mo._save_attrs_:
self.assertTrue(hasattr(mo, key))
self.assertTrue(hasattr(submo, key))
def test_load_configuration(self):
pass
def test_load_equilibrium(self):
pass
| 37.09322 | 79 | 0.629312 | 8,199 | 0.9366 | 0 | 0 | 0 | 0 | 0 | 0 | 1,957 | 0.223555 |
6fa98703634598fbfefb544679b794421049f04d | 880 | py | Python | backend/vcdat/test_end_to_end.py | CDAT/vcdat | 0d257da607bd2ed064917922af29247c6c0aaae1 | [
"BSD-3-Clause"
]
| 4 | 2018-03-29T01:51:37.000Z | 2019-09-11T14:07:34.000Z | backend/vcdat/test_end_to_end.py | CDAT/vcdat | 0d257da607bd2ed064917922af29247c6c0aaae1 | [
"BSD-3-Clause"
]
| 101 | 2018-03-20T16:58:20.000Z | 2019-11-25T19:26:52.000Z | backend/vcdat/test_end_to_end.py | CDAT/vcdat | 0d257da607bd2ed064917922af29247c6c0aaae1 | [
"BSD-3-Clause"
]
| 1 | 2018-10-22T20:50:25.000Z | 2018-10-22T20:50:25.000Z | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
import pytest
# Declare Fixtures
# --------------------------------------------------------------------
@pytest.fixture()
def driver():
driver = webdriver.Chrome()
driver.implicitly_wait(5)
driver.get("localhost:5000")
yield driver
driver.close()
# End to End testing with Selenium
# --------------------------------------------------------------------
@pytest.mark.selenium
def test_server_running(driver):
assert "vCDAT" in driver.title
@pytest.mark.selenium
def test_variable_loads(driver):
app = driver.find_element_by_xpath("//div[@id='app']")
print("xxx found app xxx")
app_container = driver.find_element_by_xpath("//div[@id='app-container']")
print("xxx found app_container xxx")
| 26.666667 | 78 | 0.621591 | 0 | 0 | 144 | 0.163636 | 515 | 0.585227 | 0 | 0 | 309 | 0.351136 |
6fa9d472e775eb87721d162cdd4f797206aefbc8 | 264 | py | Python | scripts/makeToast.py | zgrannan/Technical-Theatre-Assistant | 8928e5f4f179f75f92035e898d102dd55f32e3f3 | [
"MIT"
]
| 3 | 2017-01-05T20:02:23.000Z | 2017-10-02T19:55:58.000Z | scripts/makeToast.py | zgrannan/Technical-Theatre-Assistant | 8928e5f4f179f75f92035e898d102dd55f32e3f3 | [
"MIT"
]
| 1 | 2016-05-17T20:20:19.000Z | 2016-05-17T20:20:28.000Z | scripts/makeToast.py | zgrannan/Technical-Theatre-Assistant | 8928e5f4f179f75f92035e898d102dd55f32e3f3 | [
"MIT"
]
| null | null | null | #makes a toast with the given string ID
from sys import argv
def make_toast (string_id):
return "Toast.makeText(getBaseContext(), getString(R.string." + string_id + "), Toast.LENGTH_SHORT).show();"
if ( argv[0] == "makeToast.py" ):
print make_toast(argv[1])
| 24 | 109 | 0.708333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.526515 |
6fab9608b18da127d6b2008d803781b981e8468d | 334 | py | Python | crisiscleanup/calls/migrations/0011_merge_20180122_2308.py | CrisisCleanup/wcicp-call-service | 0a00e092625e2a48c9807737a4b72e343e1ab0b9 | [
"Apache-1.1"
]
| null | null | null | crisiscleanup/calls/migrations/0011_merge_20180122_2308.py | CrisisCleanup/wcicp-call-service | 0a00e092625e2a48c9807737a4b72e343e1ab0b9 | [
"Apache-1.1"
]
| null | null | null | crisiscleanup/calls/migrations/0011_merge_20180122_2308.py | CrisisCleanup/wcicp-call-service | 0a00e092625e2a48c9807737a4b72e343e1ab0b9 | [
"Apache-1.1"
]
| null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-01-22 23:08
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calls', '0010_auto_20180119_2117'),
('calls', '0007_auto_20180122_2157'),
]
operations = [
]
| 19.647059 | 48 | 0.658683 | 184 | 0.550898 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.404192 |
6faccc1a6a5c598d0d380be6821bb6343c6e2509 | 572 | py | Python | test/market_feature_1/test_nose_plugin.py | StefanRatzke/nose-market-features | e63b2367b77e860c919d3a33612a3ba6ff632f5f | [
"Apache-2.0"
]
| 5 | 2015-01-12T10:04:15.000Z | 2018-12-20T13:49:47.000Z | test/market_feature_1/test_nose_plugin.py | StefanRatzke/nose-market-features | e63b2367b77e860c919d3a33612a3ba6ff632f5f | [
"Apache-2.0"
]
| 16 | 2015-10-06T12:35:20.000Z | 2019-04-01T15:05:17.000Z | test/market_feature_1/test_nose_plugin.py | StefanRatzke/nose-market-features | e63b2367b77e860c919d3a33612a3ba6ff632f5f | [
"Apache-2.0"
]
| 2 | 2015-03-02T09:49:11.000Z | 2019-03-26T15:27:41.000Z | from unittest import skip
import unittest2
from nose.plugins.attrib import attr
from nose.tools import assert_equals
@attr('test_nose_plugin')
class TestNosePlugin(unittest2.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_one(self):
"""first test, simulation passing test"""
assert_equals(1, 1)
def test_one6(self):
"""first test, simulation passing test"""
assert_equals(1, 1)
def test_three(self):
"""third test, simulation failing test"""
assert_equals(1, 1)
| 21.185185 | 49 | 0.653846 | 425 | 0.743007 | 0 | 0 | 451 | 0.788462 | 0 | 0 | 141 | 0.246503 |
6fad4b2d42f41ae40846094b93b5343afe778c0f | 2,359 | py | Python | charmcraft/manifest.py | aznashwan/charmcraft | 9310ddaf800307b7ff96438173766309f7cb616f | [
"Apache-2.0"
]
| 32 | 2020-05-21T09:19:52.000Z | 2022-03-31T17:05:59.000Z | charmcraft/manifest.py | aznashwan/charmcraft | 9310ddaf800307b7ff96438173766309f7cb616f | [
"Apache-2.0"
]
| 417 | 2020-05-11T14:06:02.000Z | 2022-03-31T16:15:59.000Z | charmcraft/manifest.py | aznashwan/charmcraft | 9310ddaf800307b7ff96438173766309f7cb616f | [
"Apache-2.0"
]
| 44 | 2020-05-27T09:33:42.000Z | 2022-03-30T14:34:03.000Z | # Copyright 2020-2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For further info, check https://github.com/canonical/charmcraft
"""Charmcraft manifest.yaml related functionality."""
import datetime
import logging
import pathlib
from typing import Optional, List
import yaml
from charmcraft import __version__, config, linters
logger = logging.getLogger(__name__)
def create_manifest(
basedir: pathlib.Path,
started_at: datetime.datetime,
bases_config: Optional[config.BasesConfiguration],
linting_results: List[linters.CheckResult],
):
"""Create manifest.yaml in basedir for given base configuration.
For packing bundles, `bases` will be skipped when bases_config is None.
Charms should always include a valid bases_config.
:param basedir: Directory to create Charm in.
:param started_at: Build start time.
:param bases_config: Relevant bases configuration, if any.
:returns: Path to created manifest.yaml.
"""
content = {
"charmcraft-version": __version__,
"charmcraft-started-at": started_at.isoformat() + "Z",
}
# Annotate bases only if bases_config is not None.
if bases_config is not None:
bases = [
{
"name": r.name,
"channel": r.channel,
"architectures": r.architectures,
}
for r in bases_config.run_on
]
content["bases"] = bases
# include the linters results (only for attributes)
attributes_info = [
{"name": result.name, "result": result.result}
for result in linting_results
if result.check_type == linters.CheckType.attribute
]
content["analysis"] = {"attributes": attributes_info}
filepath = basedir / "manifest.yaml"
filepath.write_text(yaml.dump(content))
return filepath
| 31.039474 | 75 | 0.693514 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,325 | 0.561679 |
6fae14b3638b39a8b273b347bee6855c94403d97 | 5,034 | py | Python | ricnn/RICNN.py | jiangruoqiao/RICNN_RepeatGongCheng-sPaper | 6e8ee6955f6c1c60aa5c32e341664fe350902482 | [
"Apache-2.0"
]
| 33 | 2018-12-02T12:15:39.000Z | 2021-12-06T14:10:32.000Z | ricnn/RICNN.py | jiangruoqiao/RICNN_RepeatGongCheng-sPaper | 6e8ee6955f6c1c60aa5c32e341664fe350902482 | [
"Apache-2.0"
]
| 5 | 2019-02-10T20:04:00.000Z | 2021-07-03T12:52:20.000Z | ricnn/RICNN.py | jiangruoqiao/RICNN_RepeatGongCheng-sPaper | 6e8ee6955f6c1c60aa5c32e341664fe350902482 | [
"Apache-2.0"
]
| 9 | 2018-12-02T12:39:51.000Z | 2020-05-07T08:30:38.000Z | #ecoding:utf-8
import DatasetLoader
import RICNNModel
import tensorflow as tf
import sys
import numpy as np
import regularization as re
import os
import trainLoader
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
TRAIN_FILENAME = '/media/liuqi/Files/dataset/test_mnist_ricnn_raw_100.h5'
TEST_FILENAME = '/media/liuqi/Files/dataset/test_mnist_ricnn_raw.h5'
TRAIN_LABELS = '/media/liuqi/Files/dataset/rotate_100_simple.h5'
TEST_LABELS = '/home/liuqi/Desktop/mnist_rotation_new/mnist_all_rotation_normalized_float_test.amat'
LOADED_SIZE = 28
DESIRED_SIZE = 227
# model constants
NUMBER_OF_CLASSES = 10
NUMBER_OF_FILTERS = 40
NUMBER_OF_FC_FEATURES = 5120
NUMBER_OF_TRANSFORMATIONS = 8
# optimization constants
BATCH_SIZE = 64
TEST_CHUNK_SIZE = 100
ADAM_LEARNING_RATE = 1e-5
PRINTING_INTERVAL = 10
# set seeds
np.random.seed(100)
tf.set_random_seed(100)
x = tf.placeholder(tf.float32, shape=[None,
DESIRED_SIZE,
DESIRED_SIZE,
1,
NUMBER_OF_TRANSFORMATIONS])
y_gt = tf.placeholder(tf.float32, shape=[None, NUMBER_OF_CLASSES])
keep_prob = tf.placeholder(tf.float32)
logits, raw_feature, regularization_loss = RICNNModel.define_model(x,
keep_prob,
NUMBER_OF_CLASSES,
NUMBER_OF_FILTERS,
NUMBER_OF_FC_FEATURES)
with tf.name_scope('loss'):
with tf.name_scope('re_loss'):
re_loss = re.regu_constraint(raw_feature, logits)
with tf.name_scope('sotfmax_loss'):
sotfmax_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_gt))
with tf.name_scope('total_loss'):
total_loss = sotfmax_loss
train_step = tf.train.AdamOptimizer(ADAM_LEARNING_RATE).minimize(total_loss)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_gt, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
session = tf.Session()
session.run(tf.initialize_all_variables())
train_data_loader = trainLoader.DataLoader(TRAIN_FILENAME,
TRAIN_LABELS,
NUMBER_OF_CLASSES,
NUMBER_OF_TRANSFORMATIONS,
LOADED_SIZE,
DESIRED_SIZE)
test_data_loader = DatasetLoader.DataLoader(TEST_FILENAME,
TEST_LABELS,
NUMBER_OF_CLASSES,
NUMBER_OF_TRANSFORMATIONS,
LOADED_SIZE,
DESIRED_SIZE)
test_size = test_data_loader.all()[1].shape[0]
assert test_size % TEST_CHUNK_SIZE == 0
number_of_test_chunks = test_size / TEST_CHUNK_SIZE
while (True):
batch = train_data_loader.next_batch(BATCH_SIZE) # next_batch from the loader
txt_name = "accary_ricnn.txt"
txt_file = file(txt_name, "a+")
if (train_data_loader.is_new_epoch()):
train_accuracy = session.run(accuracy, feed_dict={x : batch[0],
y_gt : batch[1],
keep_prob : 1.0})
print_loss = session.run(re_loss,feed_dict={x : batch[0],
y_gt : batch[1],
keep_prob : 1.0})
print_loss_1 = session.run(sotfmax_loss, feed_dict={x: batch[0],
y_gt: batch[1],
keep_prob: 1.0})
print(print_loss)
print(print_loss_1)
train_context = "epochs:" + str(train_data_loader.get_completed_epochs()) + '\n'
txt_file.write(train_context)
loss_context = "softmax_loss:" + str(print_loss_1) + '\n'
txt_file.write(loss_context)
txt_file.close()
print("completed_epochs %d, training accuracy %g" %
(train_data_loader.get_completed_epochs(), train_accuracy))
sys.stdout.flush()
if (train_data_loader.get_completed_epochs() % PRINTING_INTERVAL == 0):
sum = 0.0
xt_name = "accary_ricnn.txt"
txt_file = file(txt_name, "a+")
for chunk_index in xrange(number_of_test_chunks):
chunk = test_data_loader.next_batch(TEST_CHUNK_SIZE)
sum += session.run(accuracy, feed_dict={x : chunk[0],
y_gt : chunk[1],
keep_prob : 1.0})
test_accuracy = sum / number_of_test_chunks
new_context = "testing accuracy: " + str(test_accuracy) + '\n'
txt_file.write(new_context)
txt_file.close()
print("testing accuracy %g" % test_accuracy)
sys.stdout.flush()
session.run(train_step, feed_dict={x : batch[0],
y_gt : batch[1],
keep_prob : 0.5})
| 43.025641 | 106 | 0.585419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 567 | 0.112634 |
6fae2120fe92d529be7069156c9a883d0d04d5b4 | 820 | py | Python | mcot/core/_scripts/gcoord/split.py | MichielCottaar/mcot.core | de00c15b946a99a048694f3d8b6ad822a835b299 | [
"MIT"
]
| null | null | null | mcot/core/_scripts/gcoord/split.py | MichielCottaar/mcot.core | de00c15b946a99a048694f3d8b6ad822a835b299 | [
"MIT"
]
| null | null | null | mcot/core/_scripts/gcoord/split.py | MichielCottaar/mcot.core | de00c15b946a99a048694f3d8b6ad822a835b299 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
"""Extract radial, sulcal, and gyral orientations from gyral coordinate NIFTI file"""
def main():
import argparse
parser = argparse.ArgumentParser("Extract radial, sulcal, and gyral dyads from a coord NIFTI file")
parser.add_argument('coord', help='name of the coord file')
parser.add_argument('-b', '--base', help='Basename of the output files')
parser.add_argument('-r', '--radial', help='Filename for the radial output (overrides the --base option)')
parser.add_argument('-s', '--sulcal', help='Filename for the sulcal output (overrides the --base option)')
parser.add_argument('-g', '--gyral', help='Filename for the gyral output (overrides the --base option)')
args = parser.parse_args()
from mcot.core.surface import utils
utils.gcoord_split(args)
| 45.555556 | 110 | 0.704878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 470 | 0.573171 |
6faf92769295dcd3722767bcf2f74a8c180b6d9e | 3,558 | py | Python | src/pyshark/packet/fields.py | Lovemma/pyshark | 06d00edacf75aa4a9abd77e0c8cab8de33ea27fb | [
"MIT"
]
| 10 | 2018-09-13T15:50:43.000Z | 2021-01-30T19:55:36.000Z | src/pyshark/packet/fields.py | Lovemma/pyshark | 06d00edacf75aa4a9abd77e0c8cab8de33ea27fb | [
"MIT"
]
| 1 | 2019-09-09T14:15:34.000Z | 2019-09-09T14:15:34.000Z | src/pyshark/packet/fields.py | Lovemma/pyshark | 06d00edacf75aa4a9abd77e0c8cab8de33ea27fb | [
"MIT"
]
| 9 | 2018-07-24T09:32:55.000Z | 2021-01-22T21:23:06.000Z | import binascii
from pyshark.packet.common import Pickleable, SlotsPickleable
class LayerField(SlotsPickleable):
"""
Holds all data about a field of a layer, both its actual value and its name and nice representation.
"""
# Note: We use this object with slots and not just a dict because
# it's much more memory-efficient (cuts about a third of the memory).
__slots__ = ['name', 'showname', 'raw_value', 'show', 'hide', 'pos', 'size', 'unmaskedvalue']
def __init__(self, name=None, showname=None, value=None, show=None, hide=None, pos=None, size=None, unmaskedvalue=None):
self.name = name
self.showname = showname
self.raw_value = value
self.show = show
self.pos = pos
self.size = size
self.unmaskedvalue = unmaskedvalue
if hide and hide == 'yes':
self.hide = True
else:
self.hide = False
def __repr__(self):
return '<LayerField %s: %s>' % (self.name, self.get_default_value())
def get_default_value(self):
"""
Gets the best 'value' string this field has.
"""
val = self.show
if not val:
val = self.raw_value
if not val:
val = self.showname
return val
@property
def showname_value(self):
"""
For fields which do not contain a normal value, we attempt to take their value from the showname.
"""
if self.showname and ': ' in self.showname:
return self.showname.split(': ', 1)[1]
@property
def showname_key(self):
if self.showname and ': ' in self.showname:
return self.showname.split(': ', 1)[0]
@property
def binary_value(self):
"""
Converts this field to binary (assuming it's a binary string)
"""
return binascii.unhexlify(self.raw_value)
@property
def int_value(self):
"""
Returns the int value of this field (assuming it's an integer integer).
"""
return int(self.raw_value)
@property
def hex_value(self):
"""
Returns the int value of this field if it's in base 16 (either as a normal number or in
a "0xFFFF"-style hex value)
"""
return int(self.raw_value, 16)
base16_value = hex_value
class LayerFieldsContainer(str, Pickleable):
"""
An object which contains one or more fields (of the same name).
When accessing member, such as showname, raw_value, etc. the appropriate member of the main (first) field saved
in this container will be shown.
"""
def __new__(cls, main_field, *args, **kwargs):
value = main_field.get_default_value()
if value is None:
value = ''
obj = str.__new__(cls, value, *args, **kwargs)
obj.fields = [main_field]
return obj
def __dir__(self):
return dir(type(self)) + list(self.__dict__.keys()) + dir(self.main_field)
def add_field(self, field):
self.fields.append(field)
@property
def main_field(self):
return self.fields[0]
@property
def alternate_fields(self):
"""
Return the alternate values of this field containers (non-main ones).
"""
return self.fields[1:]
@property
def all_fields(self):
"""
Returns all fields in a list, the main field followed by the alternate fields.
"""
return self.fields
def __getattr__(self, item):
return getattr(self.main_field, item) | 29.65 | 124 | 0.603991 | 3,474 | 0.976391 | 0 | 0 | 1,407 | 0.395447 | 0 | 0 | 1,302 | 0.365936 |
6faffdb266238fb5626bfa67ef31c69f49889449 | 1,264 | py | Python | tasks/swipe_card.py | devBezel/among_us_tasker | dd13c13a7d2d776143522ccfa27696d69524707b | [
"MIT"
]
| null | null | null | tasks/swipe_card.py | devBezel/among_us_tasker | dd13c13a7d2d776143522ccfa27696d69524707b | [
"MIT"
]
| null | null | null | tasks/swipe_card.py | devBezel/among_us_tasker | dd13c13a7d2d776143522ccfa27696d69524707b | [
"MIT"
]
| null | null | null | import pyautogui
import time
import datetime
class SwipeCard:
def __init__(self):
self.resolution = pyautogui.size()
def resolve_task(self):
try:
hide_card_position = pyautogui.center(
pyautogui.locateOnScreen(f"assets/tasks/swipe_card/main.png",
confidence=0.7))
pyautogui.click(hide_card_position[0], hide_card_position[1])
time.sleep(1)
card_position = pyautogui.center(
pyautogui.locateOnScreen(f"assets/tasks/swipe_card/card.png",
confidence=0.8))
pyautogui.moveTo(card_position[0], card_position[1])
pyautogui.mouseDown(button="left")
mouse_pos_x = card_position[0]
while (mouse_pos_x < 1450):
pyautogui.moveTo(mouse_pos_x, card_position[1])
mouse_pos_x += 60
pyautogui.click()
return True
except Exception as e:
print(e)
def log(self):
time = datetime.datetime.now()
print(
f"[{time.hour}:{time.minute}][ZADANIE] Rozwiązauje kartę w adminie"
)
def run(self):
return self.resolve_task() | 29.395349 | 79 | 0.557753 | 1,219 | 0.962875 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.114534 |
6fb000a6fd5b519a73bbb7413dd210206c96960d | 370 | py | Python | python/geeksforgeeks/arrays/rearrengment/reverse_a_string.py | othonreyes/code_problems | 6e65b26120b0b9d6e5ac7342a4d964696b7bd5bf | [
"MIT"
]
| null | null | null | python/geeksforgeeks/arrays/rearrengment/reverse_a_string.py | othonreyes/code_problems | 6e65b26120b0b9d6e5ac7342a4d964696b7bd5bf | [
"MIT"
]
| null | null | null | python/geeksforgeeks/arrays/rearrengment/reverse_a_string.py | othonreyes/code_problems | 6e65b26120b0b9d6e5ac7342a4d964696b7bd5bf | [
"MIT"
]
| null | null | null | # https://www.geeksforgeeks.org/write-a-program-to-reverse-an-array-or-string/
# Time: O(n)
# Space: 1
def reverseByMiddles(arr):
n = len(arr)
limit = n//2
for i in range(limit):
temp = arr[i]
arr[i] = arr[(n-1)-i]
arr[(n-1)-i] = temp
return arr
arr = [1,2,3]
result = reverseByMiddles(arr)
print(result)
print(reverseByMiddles(arr = [1,2,3,4]))
| 18.5 | 78 | 0.627027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.27027 |
6fb0bb046f2c82f4fc248ab9913d7e957fa16a08 | 1,397 | py | Python | test_service.py | cmput401-fall2018/web-app-ci-cd-with-travis-ci-derek-repka | c724a2b921fdb1e6ba1200b477a4add291941107 | [
"MIT"
]
| null | null | null | test_service.py | cmput401-fall2018/web-app-ci-cd-with-travis-ci-derek-repka | c724a2b921fdb1e6ba1200b477a4add291941107 | [
"MIT"
]
| 4 | 2018-10-09T17:16:38.000Z | 2021-06-10T20:56:30.000Z | test_service.py | cmput401-fall2018/web-app-ci-cd-with-travis-ci-derek-repka | c724a2b921fdb1e6ba1200b477a4add291941107 | [
"MIT"
]
| null | null | null | from service import Service
from unittest import TestCase
from mock import patch
import sys
class TestService(TestCase):
@patch('service.Service.bad_random', return_value=10)
def test_bad_random(self, bad_random):
self.assertEqual(bad_random(), 10)
@patch('service.Service.bad_random', return_value=10)
def test_divide(self, bad_random):
x = Service()
self.assertEqual(x.divide(2),5)
self.assertEqual(x.divide(-2),-5)
bad_random.return_value=-10
self.assertEqual(x.divide(2),-5)
bad_random.return_value=0
self.assertEqual(x.divide(sys.maxsize),0)
self.assertEqual(x.divide(-sys.maxsize+1),0)
def test_abs_plus(self):
x=Service()
self.assertEqual(x.abs_plus(10),11)
self.assertEqual(x.abs_plus(0),1)
self.assertEqual(x.abs_plus(-10),11)
self.assertEqual(x.abs_plus(-sys.maxsize+1),sys.maxsize)
self.assertEqual(x.abs_plus(10),11)
@patch('service.Service.bad_random', return_value=10)
def test_complicated_function(self, bad_random):
x = Service()
results = x.complicated_function(20)
self.assertEqual(results[0], 0.5)
self.assertEqual(results[1], 0)
bad_random.return_value=-13
results = x.complicated_function(-1)
self.assertEqual(results[0], 13)
self.assertEqual(results[1], 1)
bad_random.return_value=0
results = x.complicated_function(sys.maxsize)
self.assertEqual(results[0], 0)
self.assertEqual(results[1], 0)
| 27.94 | 58 | 0.740873 | 1,301 | 0.931281 | 0 | 0 | 1,013 | 0.725125 | 0 | 0 | 84 | 0.060129 |
6fb2b10f3d4883634b84a549a2f3de0c0993ed24 | 1,512 | py | Python | libweasyl/libweasyl/alembic/versions/eff79a07a88d_use_timestamp_column_for_latest_.py | akash143143/weasyl | be42a2313e657e97c4a48432379e37b6a3d4a4af | [
"Apache-2.0"
]
| 111 | 2016-05-18T04:18:18.000Z | 2021-11-03T02:05:19.000Z | libweasyl/libweasyl/alembic/versions/eff79a07a88d_use_timestamp_column_for_latest_.py | akash143143/weasyl | be42a2313e657e97c4a48432379e37b6a3d4a4af | [
"Apache-2.0"
]
| 1,103 | 2016-05-29T05:17:53.000Z | 2022-03-31T18:12:40.000Z | libweasyl/libweasyl/alembic/versions/eff79a07a88d_use_timestamp_column_for_latest_.py | TheWug/weasyl | a568a542cc58c11e30621fb672c701531d4306a8 | [
"Apache-2.0"
]
| 47 | 2016-05-29T20:48:37.000Z | 2021-11-12T09:40:40.000Z | """Use TIMESTAMP column for latest submission
Revision ID: eff79a07a88d
Revises: 83e6b2a46191
Create Date: 2017-01-08 22:20:43.814375
"""
# revision identifiers, used by Alembic.
revision = 'eff79a07a88d'
down_revision = '83e6b2a46191'
from alembic import op # lgtm[py/unused-import]
import sqlalchemy as sa # lgtm[py/unused-import]
import libweasyl
from libweasyl.legacy import UNIXTIME_OFFSET
def upgrade():
op.alter_column(
'profile',
'latest_submission_time',
new_column_name='latest_submission_time_old',
)
op.add_column(
'profile',
sa.Column('latest_submission_time', libweasyl.models.helpers.ArrowColumn(), nullable=False, server_default='epoch'),
)
op.execute(
"UPDATE profile SET latest_submission_time = TIMESTAMP WITHOUT TIME ZONE 'epoch' + "
"(latest_submission_time_old - %d) * INTERVAL '1 second'" % (UNIXTIME_OFFSET,))
op.drop_column('profile', 'latest_submission_time_old')
def downgrade():
op.alter_column(
'profile',
'latest_submission_time',
new_column_name='latest_submission_time_new',
)
op.add_column(
'profile',
sa.Column('latest_submission_time', libweasyl.models.helpers.WeasylTimestampColumn(), nullable=False, server_default='0'),
)
op.execute(
"UPDATE profile SET latest_submission_time = extract(epoch from latest_submission_time_new) + %d" % (UNIXTIME_OFFSET,))
op.drop_column('profile', 'latest_submission_time_new')
| 30.857143 | 130 | 0.707011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 765 | 0.505952 |
6fb43ce461d11d32148db76a6319e68f78bca70e | 3,499 | py | Python | MBTA/step1_group.py | 404nofound/MBTA_Python | eb5c1bb3fe5f2640dc42949402955c0f73e1c8b1 | [
"Apache-2.0"
]
| 1 | 2019-04-15T04:39:13.000Z | 2019-04-15T04:39:13.000Z | MBTA/step1_group.py | 404nofound/MBTA_Python | eb5c1bb3fe5f2640dc42949402955c0f73e1c8b1 | [
"Apache-2.0"
]
| null | null | null | MBTA/step1_group.py | 404nofound/MBTA_Python | eb5c1bb3fe5f2640dc42949402955c0f73e1c8b1 | [
"Apache-2.0"
]
| null | null | null | import pandas as pd
import numpy as np
import os
# Function, divided all data into groups by time period, like [1AM-3AM; 3AM-5Am ...]
def binning(column, points, labels=None, month=0, stop=0):
'''
Notes: The Row Data from MBTA webiste
The Time format is from 3:00 to 27:00, means 3:00 AM today to next day 3:00 AM
And in the csv file, it use int to replace date format, like 300 means 3:00 AM; 1500 means 3:00 PM
:param column: use which column to divide, here we use TIME_PERIOD column
:param points: the break points we use to divide
:param labels: the labels for result groups that have been divided
:param month: used to record error
:param stop: used to record error
'''
# Get max time and min time from data
minval = column.min()
maxval = column.max()
# Handle break points and labels errors and print
while maxval <= points[len(points)-1]:
print ('Month: ' + str(month) + ' Stop: ' + stop)
del points[len(points)-1]
del labels[len(points)-1]
while minval >= points[0]:
print ('Month: ' + str(month) + ' Stop: ' + stop)
del points[0]
del labels[0]
# The full break points includes min, max time
break_points = [minval] + points + [maxval]
# If user doesn't provide labels, using int number to replace, here I have provided labels, so it doesn't work
if not labels:
labels = range(len(points)+1)
# cut() function to divide data into groups and return them
columnbin = pd.cut(column, bins=break_points, labels=labels, include_lowest=True)
return columnbin
# Function, make directory. if exist, do nothing
def mkdir(path):
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
# Using Pandas read every months' row data, from January to July, there only 7 months provide by MBTA this year until now
for month in range(1,8):
csvfile = pd.read_csv('/Users/Eddy/Desktop/Python_MBTA/MBTA_Raw_Entry_Data/2018_0' + str(month) + '.csv')
# Format file to prepare data analysis
df = pd.DataFrame(csvfile)
# Divide data into different part group by stop id
grouped = df.groupby('GTFS_STOP_ID', as_index=False)
# For every stop's data, using binning() function to divide into different time period
for stop, group in grouped:
# Define break points
points = [500, 700, 900, 1100, 1300, 1500, 1700, 1900, 2100, 2300, 2500]
# Define labels
labels = ['3AM-5AM', '5AM-7AM', '7AM-9AM', '9AM-11AM', '11AM-1PM', '1PM-3PM', '3PM-5PM', '5PM-7PM', '7PM-9PM',
'9PM-11PM', '11PM-1AM', '1AM-3AM']
# Create new column [TIME_PERIOD_Bin] for the result returned by binning() function
group['TIME_PERIOD_Bin'] = binning(group['TIME_PERIOD'], points, labels, month, stop)
# Format all the data again
df_station = pd.DataFrame(group)
# Until now, all data have been grouped by stop_id, and then grouped by time period that we create
group_time = df_station.groupby('TIME_PERIOD_Bin')
# Make directory to store new csv files
mkdir('/Users/Eddy/Desktop/Python_MBTA/Step1/' + str(month))
# Calculate the sum of entry people number for every stops and every periods
data1 = pd.DataFrame(group_time['STATION_ENTRIES'].agg(np.sum))
# Write into the csv files
data1.to_csv('/Users/Eddy/Desktop/Python_MBTA/Step1/' + str(month) + "/" + stop + '.csv') | 38.877778 | 121 | 0.656759 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,027 | 0.579308 |
6fb582e0d7d4fa8f242a226b9d6f0299b62ae75d | 1,574 | py | Python | tests/dags/test_job_operator_jinja.py | Fahadsaadullahkhan/KubernetesJobOperator | d96f9498667f937503d1e45142060904674f823f | [
"MIT"
]
| null | null | null | tests/dags/test_job_operator_jinja.py | Fahadsaadullahkhan/KubernetesJobOperator | d96f9498667f937503d1e45142060904674f823f | [
"MIT"
]
| null | null | null | tests/dags/test_job_operator_jinja.py | Fahadsaadullahkhan/KubernetesJobOperator | d96f9498667f937503d1e45142060904674f823f | [
"MIT"
]
| null | null | null | from utils import default_args
from datetime import timedelta
from airflow import DAG
from airflow_kubernetes_job_operator import (
KubernetesJobOperator,
JobRunnerDeletePolicy,
KubernetesLegacyJobOperator,
)
dag = DAG(
"kub-job-op-test-jinja",
default_args=default_args,
description="Test base job operator",
schedule_interval=None,
catchup=False,
user_defined_macros={
"test_macro": lambda a: f"my {a}",
"default_image": "ubuntu",
},
)
namespace = None
envs = {
"TIC_COUNT": 3,
"PASS_ARG": "a test",
"JINJA_ENV": "{{ ds }}",
}
default_delete_policy = JobRunnerDeletePolicy.Never
KubernetesJobOperator(
task_id="test-job-success",
namespace=namespace,
image="{{default_image}}",
body_filepath="./templates/test_job.success.jinja.yaml",
envs=envs,
dag=dag,
delete_policy=default_delete_policy,
jinja_job_args={"test": "lama"},
)
# bash_script = """
# #/usr/bin/env bash
# echo "Legacy start for taskid {{ti.task_id}} {{job.test}}"
# cur_count=0
# while true; do
# cur_count=$((cur_count + 1))
# if [ "$cur_count" -ge "$TIC_COUNT" ]; then
# break
# fi
# date
# sleep 1
# done
# echo "Complete"
# """
# KubernetesLegacyJobOperator(
# task_id="legacy-test-job-success",
# image="{{default_image}}",
# cmds=["bash", "-c", bash_script],
# dag=dag,
# is_delete_operator_pod=True,
# env_vars=envs,
# delete_policy=default_delete_policy,
# )
if __name__ == "__main__":
dag.clear(reset_dag_runs=True)
dag.run()
| 22.169014 | 60 | 0.653113 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 779 | 0.494917 |
6fb591a1133b9da158581aa46d45eeabf6264943 | 6,458 | py | Python | spacecapsule/executor.py | zengzhilong/space-capsule | 26fd0bd42f83c918dfb06bb188009971406e7965 | [
"Apache-2.0"
]
| 7 | 2022-02-18T03:14:43.000Z | 2022-03-16T08:38:55.000Z | spacecapsule/executor.py | zengzhilong/space-capsule | 26fd0bd42f83c918dfb06bb188009971406e7965 | [
"Apache-2.0"
]
| 6 | 2022-03-10T12:16:59.000Z | 2022-03-18T01:19:00.000Z | spacecapsule/executor.py | zengzhilong/space-capsule | 26fd0bd42f83c918dfb06bb188009971406e7965 | [
"Apache-2.0"
]
| 3 | 2022-02-17T08:40:33.000Z | 2022-03-29T01:38:46.000Z | import json
import jsonpath
import paramiko
from spacecapsule.history import store_experiment, rollback_command
from subprocess import Popen, PIPE
from spacecapsule.k8s import prepare_api, copy_tar_file_to_namespaced_pod, executor_command_inside_namespaced_pod
from spacecapsule.template import chaosblade_prepare_script, resource_path, chaosblade_inject, chaosblade_prepare, \
chaosblade_jvm_delay, chaosblade_prepare_script_vm
def bash_executor(create_script, create_template, create_rollback_args, rollback_template_file, args):
# TODO 部分参数需要executor选择
script = create_script(create_template, args)
process = Popen(script, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
args.update(create_rollback_args(args))
store_experiment(args, rollback_command(rollback_template_file, args), out.decode(), err.decode())
def inject_code(namespace, pod, process_name, pid, classname, methodname, kube_config, script_file, script_name,
experiment_name):
args = locals()
agent_uid, api_instance, stderr = chaosblade_jvm_prepare(args, kube_config, namespace, pod)
print("Prepare finished, start to inject!")
# Ask k8s_executor to inject target code
inject_command = chaosblade_prepare_script(chaosblade_inject, args)
inject_msg, stderr = executor_command_inside_namespaced_pod(api_instance, namespace, pod, inject_command)
if stderr is not None:
print(stderr)
experiment_uid = jsonpath.jsonpath(json.loads(inject_msg), 'result')
# Save the UID which blade create
args.update(agent_uid=agent_uid, experiment_uid=experiment_uid[0])
args.update(desc=args)
store_experiment(args, rollback_command('chaosbladeJvm-rollback.sh', args), inject_msg, stderr)
def delay_code(namespace, pod, process, pid, classname, methodname, time, offset, kube_config, experiment_name):
args = locals()
agent_uid, api_instance, stderr = chaosblade_jvm_prepare(args, kube_config, namespace, pod)
delay_command = chaosblade_prepare_script(chaosblade_jvm_delay,args)
delay_msg, delay_err = executor_command_inside_namespaced_pod(api_instance, namespace, pod, delay_command)
experiment_uid = jsonpath.jsonpath(json.loads(delay_msg), 'result')
# Save the UID which blade create
args.update(agent_uid=agent_uid, experiment_uid=experiment_uid[0])
args.update(desc=args)
store_experiment(args, rollback_command('chaosbladeJvm-rollback.sh', args), "Success", stderr)
def chaosblade_jvm_prepare(args, kube_config, namespace, pod):
api_instance = prepare_api(kube_config)
check_result, _ = check_chaosblade_exists(api_instance, namespace, pod)
print('Check result', check_result)
if check_result == 'False':
print('Copy file')
copy_tar_file_to_namespaced_pod(api_instance, namespace, pod, resource_path('./resources/chaosblade-exec'),
'/opt/chaosblade')
copy_tar_file_to_namespaced_pod(api_instance, namespace, pod, resource_path('./resources/chaosblade-jvm'),
'/opt/chaosblade')
copy_tar_file_to_namespaced_pod(api_instance, namespace, pod, resource_path('./resources/chaosblade-module'),
'/opt/chaosblade')
out, err = executor_command_inside_namespaced_pod(api_instance, namespace, pod, [
"bash", "-c",
"chmod -R 755 /opt/chaosblade"
])
else:
print('Chaosblade Exist')
print('Copy file finished')
prepare_args = {'process': 'java'}
prepare_command = chaosblade_prepare_script(chaosblade_prepare, prepare_args)
prepare_msg, stderr = executor_command_inside_namespaced_pod(api_instance, namespace, pod, prepare_command)
print(prepare_msg, stderr)
agent_uid = jsonpath.jsonpath(json.loads(prepare_msg), 'result')
return agent_uid[0], api_instance, stderr
def check_chaosblade_exists(api_instance, namespace, pod):
commands = ["bash",
"-c",
"[ -d /opt/chaosblade ] && echo True || echo False"]
check_msg, check_err = executor_command_inside_namespaced_pod(api_instance, namespace, pod, commands)
return check_msg, check_err
def ssh_executor(ip, user, pwd, command):
ssh = paramiko.SSHClient()
key = paramiko.AutoAddPolicy()
ssh.set_missing_host_key_policy(key)
ssh.connect(ip, 22, user, pwd, timeout=5)
return ssh.exec_command(command)
def chaosblade_ssh_executor(ip, user, pwd, command, experiment_name):
args = locals()
ssh = paramiko.SSHClient()
key = paramiko.AutoAddPolicy()
ssh.set_missing_host_key_policy(key)
ssh.connect(ip, 22, user, pwd, timeout=5)
stdin, stdout, stderr = ssh_executor(ip, user, pwd, command)
exec_msg = stdout.readline().replace('\n', '')
experiment_uid = jsonpath.jsonpath(json.loads(exec_msg), 'result')
args['rollback_command'] = '/opt/chaosblade/blade destroy ' + experiment_uid[0]
store_experiment(args, rollback_command('chaosblade-ssh-rollback.sh', args), exec_msg, stderr.read().decode())
def chaosblade_ssh_jvm_executor(ip, user, pwd, process_name, pid, classname, methodname, script_file,
script_name,
experiment_name):
args = locals()
ssh = paramiko.SSHClient()
key = paramiko.AutoAddPolicy()
ssh.set_missing_host_key_policy(key)
ssh.connect(ip, 22, user, pwd, timeout=5)
prepare_args = {'pid': pid}
prepare_command = chaosblade_prepare_script_vm(chaosblade_prepare, prepare_args)
stdin, stdout, stderr = ssh_executor(ip, user, pwd, prepare_command)
prepare_msg = stdout.readline().replace('\n', '')
print(prepare_command)
print(prepare_msg, stderr.readlines())
agent_uid = jsonpath.jsonpath(json.loads(prepare_msg), 'result')
inject_command = chaosblade_prepare_script_vm(chaosblade_inject, args)
stdin, stdout, stderr = ssh_executor(ip, user, pwd, inject_command)
inject_msg = stdout.readline().replace('\n', '')
experiment_uid = jsonpath.jsonpath(json.loads(inject_msg), 'result')
print('exe', experiment_uid)
print('agent', agent_uid)
# Save the UID which blade create
args.update(agent_uid=agent_uid[0], experiment_uid=experiment_uid[0])
args.update()
store_experiment(args, rollback_command('chaosblade-ssh-jvm-rollback.sh', args), inject_msg, stderr.read().decode())
| 48.19403 | 120 | 0.722979 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 795 | 0.122799 |
6fb7c5d5fb34e77d36095b18c408e6edbe184b8f | 2,952 | py | Python | experiments/2d_shallowwater/gen.py | flabowski/POD-UQNN | 1c81be432e69d24ae894828f42918fbc1fe54bc1 | [
"MIT"
]
| 15 | 2020-05-29T11:42:14.000Z | 2022-03-20T03:53:44.000Z | experiments/2d_shallowwater/gen.py | flabowski/POD-UQNN | 1c81be432e69d24ae894828f42918fbc1fe54bc1 | [
"MIT"
]
| null | null | null | experiments/2d_shallowwater/gen.py | flabowski/POD-UQNN | 1c81be432e69d24ae894828f42918fbc1fe54bc1 | [
"MIT"
]
| 11 | 2020-06-09T01:16:22.000Z | 2021-04-27T08:53:02.000Z | """POD-NN modeling for 1D, unsteady Burger Equation."""
#%% Imports
import sys
import os
import pickle
import numpy as np
sys.path.append(os.path.join("..", ".."))
from poduqnn.podnnmodel import PodnnModel
from poduqnn.mesh import read_multi_space_sol_input_mesh
from poduqnn.handling import clean_dir, split_dataset
from hyperparams import HP as hp
resdir = "cache"
clean_dir(resdir)
# Getting data from the files
# fake_x = np.zeros(hp["n_s"] + hp["n_s_tst"])
# test_size = hp["n_s_tst"] / (hp["n_s"] + hp["n_s_tst"])
# train_tst_idx = split_dataset(fake_x, fake_x, test_size, idx_only=True)
train_tst_idx = ([129, 13, 161, 10, 3, 4, 68, 19, 108, 63, 62, 147, 117, 113, 165, 80, 124, 33, 41, 37, 79, 184, 154, 83, 102, 190, 195, 148, 46, 114, 16, 155, 121, 104, 120, 58, 53, 78, 160, 193, 126, 115, 95, 127, 166, 131, 49, 100, 84, 35, 12, 27, 118, 167, 66, 56, 106, 175, 143, 97, 87, 1, 183, 111, 36, 158, 153, 199, 17, 31, 177, 194, 182, 59, 187, 130, 163, 92, 48, 96, 82, 6, 123, 98, 192, 43, 26, 181, 170, 134, 72, 50, 24, 174, 122, 103, 71, 138, 110, 7, 65, 51, 28, 173, 172, 34, 90, 119, 185, 15, 186, 101, 85, 60, 75, 39, 38, 5, 141, 89, 57, 144, 64, 67, 171, 157, 94, 70, 142, 54, 74, 146, 191, 112, 107, 189, 30, 32, 133, 169, 151, 23, 21, 99, 2, 22, 116, 91, 145, 178, 137, 135, 40, 73, 47, 52, 25, 93, 128, 88, 109, 44, 29, 198, 159, 125, 11, 45, 197, 149, 69, 188, 164, 0, 18, 176, 9, 168, 77, 132], [76, 42, 179, 61, 105, 136, 86, 196, 8, 14, 139, 20, 150, 152, 180, 162, 140, 81, 55, 156])
with open(os.path.join("cache", "train_tst_idx.pkl"), "wb") as f:
pickle.dump(train_tst_idx, f)
datadir = "data"
mu_path = os.path.join(datadir, "INPUT_MONTE_CARLO.dat")
# x_mesh, connectivity, X_v, U = \
# read_multi_space_sol_input_mesh(hp["n_s"], 1, 1, train_tst_idx[0],
# hp["mesh_idx"], datadir, mu_path,
# hp["mu_idx"])
# np.save(os.path.join("cache", "x_mesh.npy"), x_mesh)
# np.save(os.path.join("cache", "connectivity.npy"), connectivity)
# np.save(os.path.join("cache", "X_v.npy"), X_v)
# np.save(os.path.join("cache", "U.npy"), U)
x_mesh = np.load(os.path.join("cache", "x_mesh.npy"))
connectivity = np.load(os.path.join("cache", "connectivity.npy"))
X_v = np.load(os.path.join("cache", "X_v.npy"))
U = np.load(os.path.join("cache", "U.npy"))
# x_mesh = np.load(os.path.join("cache", "x_mesh.npy"))
# connectivity = np.load(os.path.join("cache", "connectivity.npy"))
# X_v = np.load(os.path.join("cache", "X_v.npy"))
# U = np.load(os.path.join("cache", "U.npy"))
#%% Init the model
model = PodnnModel(resdir, hp["n_v"], x_mesh, hp["n_t"])
#%% Generate the dataset from the mesh and params
X_v_train, v_train, \
X_v_val, v_val, \
U_val = model.convert_multigpu_data(U, X_v, hp["train_val"], hp["eps"])
model.initVNNs(hp["n_M"], hp["h_layers"], hp["lr"], hp["lambda"],
hp["adv_eps"], hp["soft_0"], hp["norm"])
| 50.033898 | 910 | 0.618225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,230 | 0.416667 |
6fb84b781b83af75ecf875a208c9ad2d89873dec | 13,981 | py | Python | template_container_human/labels/slice_7.py | lkondratova/Brainplot | 3c8a88c1995dedeaa5cbd88ee71499c7cf9c571d | [
"MIT"
]
| null | null | null | template_container_human/labels/slice_7.py | lkondratova/Brainplot | 3c8a88c1995dedeaa5cbd88ee71499c7cf9c571d | [
"MIT"
]
| null | null | null | template_container_human/labels/slice_7.py | lkondratova/Brainplot | 3c8a88c1995dedeaa5cbd88ee71499c7cf9c571d | [
"MIT"
]
| null | null | null | coordinates_01EE00 = ((121, 126),
(121, 132), (121, 134), (121, 135), (121, 137), (121, 138), (121, 139), (121, 140), (121, 141), (122, 114), (122, 115), (122, 116), (122, 117), (122, 119), (122, 125), (122, 127), (122, 128), (122, 142), (123, 110), (123, 111), (123, 112), (123, 113), (123, 120), (123, 124), (123, 126), (123, 130), (123, 132), (123, 133), (123, 134), (123, 135), (123, 136), (123, 137), (123, 138), (123, 139), (123, 140), (123, 141), (123, 144), (124, 107), (124, 109), (124, 114), (124, 115), (124, 116), (124, 117), (124, 118), (124, 120), (124, 124), (124, 125), (124, 126), (124, 127), (124, 128), (124, 129), (124, 131), (124, 132), (124, 133), (124, 134), (124, 135), (124, 136), (124, 137), (124, 138), (124, 139), (124, 140), (124, 141), (124, 142), (124, 146), (125, 105), (125, 110), (125, 111), (125, 112), (125, 113), (125, 114), (125, 115), (125, 116), (125, 117),
(125, 118), (125, 119), (125, 120), (125, 122), (125, 124), (125, 125), (125, 126), (125, 127), (125, 128), (125, 129), (125, 130), (125, 131), (125, 132), (125, 133), (125, 134), (125, 135), (125, 136), (125, 137), (125, 138), (125, 139), (125, 140), (125, 141), (125, 142), (125, 143), (125, 144), (125, 148), (126, 103), (126, 107), (126, 108), (126, 109), (126, 110), (126, 111), (126, 112), (126, 113), (126, 114), (126, 115), (126, 116), (126, 117), (126, 118), (126, 119), (126, 120), (126, 124), (126, 125), (126, 126), (126, 127), (126, 128), (126, 129), (126, 130), (126, 131), (126, 132), (126, 133), (126, 134), (126, 135), (126, 136), (126, 137), (126, 138), (126, 139), (126, 140), (126, 141), (126, 142), (126, 143), (126, 144), (126, 145), (126, 146), (126, 149), (127, 102), (127, 105), (127, 106), (127, 107), (127, 108), (127, 109), (127, 110),
(127, 111), (127, 112), (127, 113), (127, 114), (127, 115), (127, 116), (127, 117), (127, 118), (127, 119), (127, 120), (127, 121), (127, 122), (127, 123), (127, 124), (127, 125), (127, 126), (127, 127), (127, 128), (127, 129), (127, 130), (127, 131), (127, 132), (127, 133), (127, 134), (127, 135), (127, 136), (127, 137), (127, 138), (127, 139), (127, 140), (127, 141), (127, 142), (127, 143), (127, 144), (127, 145), (127, 146), (127, 147), (127, 148), (127, 150), (128, 103), (128, 105), (128, 106), (128, 107), (128, 108), (128, 109), (128, 110), (128, 111), (128, 112), (128, 113), (128, 114), (128, 115), (128, 116), (128, 117), (128, 118), (128, 119), (128, 120), (128, 121), (128, 122), (128, 123), (128, 124), (128, 125), (128, 126), (128, 127), (128, 128), (128, 129), (128, 130), (128, 131), (128, 132), (128, 133), (128, 134), (128, 135), (128, 136),
(128, 137), (128, 138), (128, 139), (128, 140), (128, 141), (128, 142), (128, 143), (128, 144), (128, 145), (128, 146), (128, 147), (128, 149), (129, 103), (129, 105), (129, 106), (129, 107), (129, 108), (129, 109), (129, 110), (129, 111), (129, 112), (129, 113), (129, 114), (129, 115), (129, 116), (129, 117), (129, 118), (129, 119), (129, 120), (129, 121), (129, 122), (129, 123), (129, 124), (129, 125), (129, 126), (129, 127), (129, 128), (129, 129), (129, 130), (129, 131), (129, 132), (129, 133), (129, 134), (129, 135), (129, 136), (129, 137), (129, 138), (129, 139), (129, 140), (129, 141), (129, 142), (129, 143), (129, 144), (129, 145), (129, 146), (129, 147), (129, 149), (130, 104), (130, 107), (130, 108), (130, 109), (130, 110), (130, 111), (130, 112), (130, 113), (130, 114), (130, 115), (130, 116), (130, 117), (130, 118), (130, 119), (130, 120),
(130, 121), (130, 122), (130, 123), (130, 124), (130, 125), (130, 126), (130, 127), (130, 128), (130, 129), (130, 130), (130, 131), (130, 132), (130, 133), (130, 134), (130, 135), (130, 136), (130, 137), (130, 138), (130, 139), (130, 140), (130, 141), (130, 142), (130, 143), (130, 144), (130, 145), (130, 146), (130, 148), (131, 104), (131, 106), (131, 107), (131, 108), (131, 109), (131, 110), (131, 111), (131, 112), (131, 113), (131, 114), (131, 115), (131, 116), (131, 117), (131, 118), (131, 119), (131, 120), (131, 121), (131, 122), (131, 123), (131, 124), (131, 125), (131, 126), (131, 127), (131, 128), (131, 129), (131, 130), (131, 131), (131, 132), (131, 133), (131, 134), (131, 135), (131, 136), (131, 137), (131, 138), (131, 139), (131, 140), (131, 141), (131, 142), (131, 143), (131, 144), (131, 145), (131, 147), (132, 108), (132, 109), (132, 110),
(132, 111), (132, 112), (132, 113), (132, 114), (132, 115), (132, 116), (132, 117), (132, 118), (132, 119), (132, 120), (132, 121), (132, 122), (132, 123), (132, 124), (132, 125), (132, 126), (132, 127), (132, 128), (132, 129), (132, 130), (132, 131), (132, 132), (132, 133), (132, 134), (132, 135), (132, 136), (132, 137), (132, 138), (132, 139), (132, 140), (132, 141), (132, 142), (132, 143), (132, 144), (132, 146), (133, 108), (133, 110), (133, 111), (133, 112), (133, 113), (133, 114), (133, 115), (133, 116), (133, 117), (133, 118), (133, 119), (133, 120), (133, 121), (133, 122), (133, 123), (133, 124), (133, 125), (133, 126), (133, 127), (133, 128), (133, 129), (133, 130), (133, 131), (133, 132), (133, 133), (133, 134), (133, 135), (133, 136), (133, 137), (133, 138), (133, 139), (133, 140), (133, 141), (133, 142), (133, 143), (133, 144), (133, 146),
(134, 107), (134, 108), (134, 109), (134, 110), (134, 111), (134, 112), (134, 113), (134, 114), (134, 115), (134, 116), (134, 117), (134, 118), (134, 119), (134, 120), (134, 121), (134, 122), (134, 123), (134, 124), (134, 125), (134, 126), (134, 127), (134, 128), (134, 129), (134, 130), (134, 131), (134, 132), (134, 133), (134, 134), (134, 135), (134, 136), (134, 137), (134, 138), (134, 139), (134, 140), (134, 141), (134, 142), (134, 143), (134, 145), (135, 106), (135, 108), (135, 109), (135, 110), (135, 111), (135, 112), (135, 113), (135, 114), (135, 115), (135, 116), (135, 117), (135, 118), (135, 119), (135, 120), (135, 121), (135, 122), (135, 123), (135, 124), (135, 125), (135, 126), (135, 127), (135, 128), (135, 129), (135, 130), (135, 131), (135, 132), (135, 133), (135, 134), (135, 135), (135, 136), (135, 137), (135, 138), (135, 139), (135, 140),
(135, 141), (135, 142), (135, 143), (135, 145), (136, 105), (136, 107), (136, 110), (136, 111), (136, 112), (136, 113), (136, 114), (136, 115), (136, 116), (136, 117), (136, 118), (136, 119), (136, 120), (136, 121), (136, 122), (136, 123), (136, 124), (136, 125), (136, 126), (136, 127), (136, 128), (136, 129), (136, 130), (136, 131), (136, 132), (136, 133), (136, 134), (136, 135), (136, 136), (136, 137), (136, 138), (136, 139), (136, 140), (136, 141), (136, 142), (136, 144), (137, 105), (137, 109), (137, 110), (137, 111), (137, 112), (137, 113), (137, 114), (137, 115), (137, 116), (137, 117), (137, 118), (137, 119), (137, 120), (137, 121), (137, 122), (137, 123), (137, 124), (137, 125), (137, 126), (137, 127), (137, 128), (137, 129), (137, 130), (137, 131), (137, 132), (137, 133), (137, 134), (137, 135), (137, 136), (137, 137), (137, 138), (137, 139),
(137, 140), (137, 141), (137, 142), (137, 144), (138, 105), (138, 107), (138, 110), (138, 112), (138, 113), (138, 114), (138, 115), (138, 116), (138, 117), (138, 118), (138, 119), (138, 120), (138, 121), (138, 122), (138, 123), (138, 124), (138, 125), (138, 126), (138, 127), (138, 128), (138, 129), (138, 130), (138, 131), (138, 132), (138, 133), (138, 134), (138, 135), (138, 136), (138, 137), (138, 138), (138, 139), (138, 140), (138, 141), (138, 143), (139, 106), (139, 110), (139, 112), (139, 113), (139, 114), (139, 115), (139, 116), (139, 117), (139, 118), (139, 119), (139, 120), (139, 121), (139, 122), (139, 123), (139, 124), (139, 125), (139, 126), (139, 127), (139, 128), (139, 129), (139, 130), (139, 131), (139, 132), (139, 133), (139, 134), (139, 135), (139, 136), (139, 137), (139, 138), (139, 139), (139, 140), (139, 142), (140, 110), (140, 112),
(140, 113), (140, 114), (140, 115), (140, 116), (140, 117), (140, 118), (140, 119), (140, 120), (140, 121), (140, 122), (140, 123), (140, 124), (140, 125), (140, 126), (140, 127), (140, 128), (140, 129), (140, 130), (140, 131), (140, 132), (140, 133), (140, 134), (140, 135), (140, 138), (140, 139), (140, 141), (141, 110), (141, 115), (141, 116), (141, 117), (141, 118), (141, 119), (141, 120), (141, 121), (141, 122), (141, 123), (141, 124), (141, 125), (141, 126), (141, 127), (141, 128), (141, 129), (141, 130), (141, 131), (141, 132), (141, 133), (141, 134), (141, 136), (141, 137), (141, 138), (141, 139), (141, 141), (142, 110), (142, 112), (142, 113), (142, 114), (142, 115), (142, 116), (142, 117), (142, 118), (142, 119), (142, 120), (142, 121), (142, 122), (142, 123), (142, 124), (142, 125), (142, 126), (142, 127), (142, 128), (142, 129), (142, 130),
(142, 131), (142, 132), (142, 133), (142, 135), (142, 138), (142, 141), (143, 115), (143, 117), (143, 118), (143, 119), (143, 120), (143, 121), (143, 122), (143, 123), (143, 124), (143, 125), (143, 126), (143, 127), (143, 128), (143, 129), (143, 130), (143, 131), (143, 132), (143, 134), (143, 138), (143, 140), (144, 115), (144, 117), (144, 118), (144, 119), (144, 120), (144, 121), (144, 122), (144, 123), (144, 124), (144, 125), (144, 126), (144, 127), (144, 128), (144, 129), (144, 130), (144, 131), (144, 138), (145, 115), (145, 117), (145, 118), (145, 119), (145, 120), (145, 121), (145, 122), (145, 123), (145, 124), (145, 125), (145, 126), (145, 127), (145, 128), (145, 129), (145, 133), (145, 138), (145, 139), (146, 116), (146, 119), (146, 120), (146, 121), (146, 122), (146, 123), (146, 124), (146, 125), (146, 126), (146, 127), (146, 128), (146, 138),
(147, 117), (147, 119), (147, 120), (147, 121), (147, 122), (147, 123), (147, 124), (147, 125), (147, 126), (147, 127), (147, 129), (148, 119), (148, 121), (148, 122), (148, 123), (148, 124), (148, 125), (148, 126), (148, 128), (149, 119), (149, 121), (149, 122), (149, 123), (149, 128), (150, 118), (150, 120), (150, 121), (150, 125), (150, 126), (150, 129), (151, 118), (151, 123), (151, 127), (151, 128), (151, 130), (151, 132), (152, 120), (152, 121), (152, 128), (152, 133), )
coordinates_00EE00 = ((98, 135),
(99, 121), (99, 122), (99, 135), (99, 136), (100, 120), (100, 122), (100, 135), (101, 114), (101, 120), (101, 123), (101, 129), (101, 135), (101, 137), (102, 114), (102, 119), (102, 121), (102, 123), (102, 128), (102, 130), (102, 136), (103, 114), (103, 119), (103, 121), (103, 122), (103, 123), (103, 125), (103, 126), (103, 130), (104, 112), (104, 115), (104, 118), (104, 120), (104, 123), (104, 128), (104, 130), (105, 111), (105, 114), (105, 116), (105, 117), (105, 119), (105, 120), (105, 121), (105, 122), (105, 123), (105, 124), (105, 125), (105, 126), (105, 127), (105, 128), (105, 129), (105, 130), (105, 132), (106, 111), (106, 113), (106, 114), (106, 115), (106, 118), (106, 120), (106, 123), (106, 125), (106, 126), (106, 127), (106, 128), (106, 129), (106, 130), (106, 134), (107, 111), (107, 113), (107, 114), (107, 115), (107, 116), (107, 117),
(107, 118), (107, 120), (107, 123), (107, 124), (107, 125), (107, 126), (107, 127), (107, 128), (107, 129), (107, 130), (107, 131), (107, 132), (107, 134), (108, 111), (108, 113), (108, 114), (108, 115), (108, 116), (108, 117), (108, 118), (108, 119), (108, 120), (108, 123), (108, 126), (108, 127), (108, 128), (108, 129), (108, 130), (108, 131), (108, 132), (108, 133), (108, 135), (109, 111), (109, 115), (109, 116), (109, 117), (109, 118), (109, 119), (109, 120), (109, 121), (109, 123), (109, 124), (109, 125), (109, 128), (109, 129), (109, 130), (109, 131), (109, 132), (109, 133), (109, 134), (109, 136), (110, 110), (110, 112), (110, 113), (110, 114), (110, 117), (110, 118), (110, 119), (110, 120), (110, 121), (110, 123), (110, 126), (110, 127), (110, 128), (110, 129), (110, 130), (110, 131), (110, 132), (110, 133), (110, 134), (110, 135), (110, 138),
(111, 109), (111, 111), (111, 115), (111, 117), (111, 118), (111, 119), (111, 120), (111, 122), (111, 128), (111, 130), (111, 131), (111, 132), (111, 133), (111, 134), (111, 135), (111, 136), (111, 140), (112, 107), (112, 111), (112, 117), (112, 119), (112, 120), (112, 121), (112, 123), (112, 128), (112, 130), (112, 131), (112, 132), (112, 133), (112, 134), (112, 135), (112, 136), (112, 137), (112, 138), (113, 105), (113, 109), (113, 110), (113, 111), (113, 113), (113, 118), (113, 120), (113, 121), (113, 122), (113, 123), (113, 124), (113, 125), (113, 126), (113, 127), (113, 130), (113, 131), (113, 132), (113, 133), (113, 134), (113, 135), (113, 136), (113, 137), (113, 138), (113, 139), (113, 141), (114, 105), (114, 107), (114, 108), (114, 109), (114, 110), (114, 111), (114, 114), (114, 118), (114, 119), (114, 120), (114, 121), (114, 122), (114, 123),
(114, 128), (114, 129), (114, 131), (114, 132), (114, 133), (114, 134), (114, 135), (114, 136), (114, 137), (114, 138), (114, 139), (114, 141), (115, 105), (115, 107), (115, 108), (115, 109), (115, 110), (115, 111), (115, 112), (115, 113), (115, 116), (115, 117), (115, 118), (115, 119), (115, 120), (115, 121), (115, 122), (115, 123), (115, 124), (115, 127), (115, 130), (115, 132), (115, 133), (115, 134), (115, 135), (115, 136), (115, 137), (115, 138), (115, 139), (115, 141), (116, 105), (116, 126), (116, 131), (116, 133), (116, 134), (116, 135), (116, 136), (116, 137), (116, 138), (116, 139), (116, 141), (117, 106), (117, 108), (117, 109), (117, 110), (117, 111), (117, 112), (117, 113), (117, 114), (117, 115), (117, 116), (117, 117), (117, 118), (117, 119), (117, 120), (117, 121), (117, 122), (117, 124), (117, 131), (117, 140), (118, 132), (118, 134),
(118, 135), (118, 136), (118, 138), (118, 140), (119, 132), (119, 135), (119, 139), (119, 140), )
coordinates_E0E1E1 = ((126, 127),
(126, 134), (127, 118), (127, 126), (127, 134), (128, 118), (128, 125), (128, 128), (129, 119), (129, 128), (129, 129), (130, 123), (130, 128), (130, 130), (131, 122), (131, 128), (131, 129), (132, 122), (132, 128), (134, 122), (136, 121), (137, 121), )
coordinates_E1E1E1 = ((111, 125),
(112, 114), )
| 582.541667 | 865 | 0.500036 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6fb8b1b58b2f38e1b4575b537fc9d0698a19d441 | 565 | py | Python | day1/ex4.py | dsky1990/python_30days | 3a9d8a29bd32979be1f4ef01be44999073dab5c4 | [
"MIT"
]
| 1 | 2018-10-20T14:45:31.000Z | 2018-10-20T14:45:31.000Z | day1/ex4.py | dsky1990/python_30days | 3a9d8a29bd32979be1f4ef01be44999073dab5c4 | [
"MIT"
]
| null | null | null | day1/ex4.py | dsky1990/python_30days | 3a9d8a29bd32979be1f4ef01be44999073dab5c4 | [
"MIT"
]
| null | null | null | cars = 100
space_in_a_car = 4.0
drivers = 30
passengers = 90
cars_not_driven = cars -drivers
cars_driven = drivers
carpool_carpacity = cars_driven * space_in_a_car
average_passengers_per_car = passengers / cars_driven
print("There are", cars, "cars available")
print("There are only", drivers, "drivers available")
print("There will be", cars_not_driven, "empty cars today")
print("We can transport", carpool_carpacity, "people today")
print("We have", passengers, "to carpool today")
print("We need to put about", average_passengers_per_car, "people in each car") | 37.666667 | 79 | 0.771681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 196 | 0.346903 |
6fb8b2f2709c0ffde62b6fe00c0e82cc6d0aeb7e | 6,599 | py | Python | faculty_xval/bin/jobs_cross_validation_executor.py | facultyai/faculty-xval | 73468b6c7b2be1a21728ac01f0fe16e7c20110a0 | [
"Apache-2.0"
]
| 4 | 2019-04-10T17:57:29.000Z | 2019-04-17T16:13:49.000Z | faculty_xval/bin/jobs_cross_validation_executor.py | facultyai/faculty-xval | 73468b6c7b2be1a21728ac01f0fe16e7c20110a0 | [
"Apache-2.0"
]
| 4 | 2019-04-12T08:26:24.000Z | 2019-04-20T06:21:09.000Z | faculty_xval/bin/jobs_cross_validation_executor.py | facultyai/faculty-xval | 73468b6c7b2be1a21728ac01f0fe16e7c20110a0 | [
"Apache-2.0"
]
| 1 | 2021-04-04T10:48:14.000Z | 2021-04-04T10:48:14.000Z | import json
import logging
import os
import click
import numpy as np
from keras import backend as K
from keras.models import load_model as keras_load
from sklearn.base import clone as sklearn_clone
from sklearn.externals import joblib
from faculty_xval.utilities import keras_clone_and_compile
LOGGER = logging.getLogger(__name__)
logging.basicConfig(format="%(asctime)s %(message)s", level=logging.INFO)
def load_model(path, model_type):
"""
Load the model using the method appropriate for its type ("keras" or other).
Parameters
----------
path: String
File path to look for the model.
model_type: String
String specifying the type of model to be loaded. Anything other than
"keras" will be loaded using joblib.
"""
if model_type == "keras":
# Load Keras model.
LOGGER.info("Loading Keras model")
model = keras_load(path)
LOGGER.info("Model loading complete")
else:
# Load model of other type.
LOGGER.info("Loading model with joblib")
model = joblib.load(path)
LOGGER.info("Model loading complete")
return model
def clone_model(model, model_type):
"""
Clone the model using the method appropriate for its type ("keras",
"sklearn" or other). Reset the state of the model so that each train/test
split is independent.
Parameters
----------
model: Scikit-Learn/Keras Model
Model to be cloned.
model_type: String
String specifying the type of model to be cloned. Recognised options
are "keras" and "sklearn". Any other option results in the function
returning the input model, thus doing nothing.
Returns
-------
cloned: Scikit-Learn/Keras Model
The cloned model with reset state.
"""
if model_type == "keras":
cloned = keras_clone_and_compile(model)
elif model_type == "sklearn":
cloned = sklearn_clone(model)
else:
cloned = model
LOGGER.warning(
"Model type not recognised. "
+ "Cannot reset the state of the model automatically"
)
return cloned
def validate(
model, features, targets, i_train, i_test, fit_kwargs=None, predict_kwargs=None
):
"""
Fit the model on specific training data, and predict on specific test data.
Parameters
----------
model: sklearn/keras Model
Model to cross-validate.
features: list of np.array
Features for training/testing. For multi-input models, the list contains
multiple Numpy arrays.
targets: list of np.array
Targets for training/testing. For multi-output models, the list contains
multiple Numpy arrays.
i_train: np.array
np.array of indices corresponding to the rows used for training
i_test: np.array
np.array of indices corresponding to the rows used for testing
fit_kwargs: dict, optional, default = None
Dictionary of any additional kwargs to be used by the model during
fitting.
predict_kwargs: dict, optional, default = None
Dictionary of any additional kwargs to be used by the model during
prediction.
Returns
--------
predictions: np.array
Model predictions.
"""
if fit_kwargs is None:
fit_kwargs = {}
if predict_kwargs is None:
predict_kwargs = {}
LOGGER.info("Training the model")
features_train = [x[i_train] for x in features]
targets_train = [y[i_train] for y in targets]
if len(features_train) == 1:
features_train = features_train[0].copy()
if len(targets_train) == 1:
targets_train = targets_train[0].copy()
model.fit(features_train, targets_train, **fit_kwargs)
LOGGER.info("Generating model predictions")
features_test = [x[i_test] for x in features]
if len(features_test) == 1:
features_test = features_test[0].copy()
predictions = model.predict(features_test, **predict_kwargs)
return np.array(predictions)
@click.command()
@click.argument("input_paths")
def main(input_paths):
"""
Validate the model for the different train/test splits corresponding to the
input file paths.
Parameters
----------
input_paths: String
String that defines the paths to load job instructions from. Distinct
paths are separated by a colon ":".
"""
# Get a list of input file paths.
input_paths = [x.strip() for x in input_paths.split(":")]
# Load data.
LOGGER.info("Loading features and targets from disk")
with open(input_paths[0], "r") as f:
_instructions = json.load(f)
with open(_instructions["features_path"], "r") as f:
features = json.load(f)
with open(_instructions["targets_path"], "r") as f:
targets = json.load(f)
# Convert datasets to Numpy arrays.
features = [np.array(x) for x in features]
targets = [np.array(y) for y in targets]
# Iterate over train/test splits.
K.clear_session()
for input_path in input_paths:
with open(input_path, "r") as f:
instructions = json.load(f)
LOGGER.info("Processing split {}".format(instructions["split_id"]))
# Load model.
archetype = load_model(instructions["model_path"], instructions["model_type"])
# Reset the state of the model to ensure
# that all splits are independent.
LOGGER.info("Cloning the model. Resetting the state of the model")
model = clone_model(archetype, instructions["model_type"])
# Run validation on specific training and testing datasets.
predictions = validate(
model,
features,
targets,
instructions["training_indices"],
instructions["test_indices"],
fit_kwargs=instructions["fit_kwargs"],
predict_kwargs=instructions["predict_kwargs"],
)
# Save the predictions alongside an identifier.
output_dir = os.path.dirname(input_path)
output_path_predictions = os.path.join(output_dir, "output.json")
LOGGER.info("Saving predictions to {}".format(output_path_predictions))
with open(output_path_predictions, "w") as f:
json.dump({instructions["split_id"]: predictions.tolist()}, f)
# Clear session to avoid memory build-up.
K.clear_session()
del model
del archetype
if __name__ == "__main__":
main()
| 29.859729 | 86 | 0.641612 | 0 | 0 | 0 | 0 | 2,417 | 0.366268 | 0 | 0 | 3,381 | 0.51235 |
6fb91a03150c13b0761acd74d5215ec8826d01b1 | 2,262 | py | Python | Solution.py | TheMLGuy/Simple-Web-Scraper | 4b4a27e7af3c0cf7bbe5aea4036f52487666dd85 | [
"Apache-2.0"
]
| null | null | null | Solution.py | TheMLGuy/Simple-Web-Scraper | 4b4a27e7af3c0cf7bbe5aea4036f52487666dd85 | [
"Apache-2.0"
]
| null | null | null | Solution.py | TheMLGuy/Simple-Web-Scraper | 4b4a27e7af3c0cf7bbe5aea4036f52487666dd85 | [
"Apache-2.0"
]
| 1 | 2021-02-06T15:47:04.000Z | 2021-02-06T15:47:04.000Z | from bs4 import BeautifulSoup
import requests
import math
import time
start_url='https://www.macys.com'
domain='https://www.macys.com'
''' get soup '''
def get_soup(url):
# get contents from url
content=''
while content=='':
try:
content = requests.get(url,
headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}).content
except:
time.sleep(5)
continue
return BeautifulSoup(content,'lxml') # choose lxml parser
'''find all anchor tags'''
def findAllATags(url):
soup = get_soup(url)
a_tags = soup.findAll('a')
a_tags=[a for a in [a for a in a_tags if 'href' in a.attrs] if a.attrs['href'].find('/shop')==0]
return a_tags
'''print all 'title' attributes'''
def printTitles(url,f):
soup=get_soup(domain+url)
temp=[i.find('a') for i in soup.findAll('div',{'class':'productThumbnailImage'})]
for i in temp:
f.write(i['title']+'\n')
'''iterate through all pages for each soup object'''
def pagination(count, url,f,u):
count_=math.ceil(count/60)
i=2
printTitles(url,f)
u.write(url+'\n')
while i<=count_:
printTitles(url.replace("?","/Pageindex/"+str(i)+"?"),f)
i+=1
'''filehandlers for output.txt and urlHandler.txt'''
def fileHandler():
f=open('output.txt','a')
return f
def urlHandler():
f=open('urlHandler.txt','a')
return f
'''generates soup object for each url'''
def getItems(url):
soup=get_soup(domain+url)
try:
f=fileHandler()
u=urlHandler()
f.write(soup.find('span', {'id' : 'currentCategory'}).text+'\n')
pagination(int(soup.find('span',{'id':'productCount'}).text),url,f, u)
except:
pass
finally:
f.close()
u.close()
'''main function'''
if __name__=='__main__':
start_time=time.time()
items=[]
tags=findAllATags(url=start_url)
'''executing getItems for tags[12:] because first 11 have no relevant information'''
for i in tags[12:]:
getItems(i.attrs['href'])
print(time.time()-start_time)
| 29.763158 | 188 | 0.59107 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 758 | 0.335102 |
6fb98dc934dbec999a997933b56e8bd368d485a6 | 1,243 | py | Python | contactnetwork/urls.py | pszgaspar/protwis | 4989a67175ef3c95047d795c843cf6b9cf4141fa | [
"Apache-2.0"
]
| 21 | 2016-01-20T09:33:14.000Z | 2021-12-20T19:19:45.000Z | contactnetwork/urls.py | pszgaspar/protwis | 4989a67175ef3c95047d795c843cf6b9cf4141fa | [
"Apache-2.0"
]
| 75 | 2016-02-26T16:29:58.000Z | 2022-03-21T12:35:13.000Z | contactnetwork/urls.py | pszgaspar/protwis | 4989a67175ef3c95047d795c843cf6b9cf4141fa | [
"Apache-2.0"
]
| 77 | 2016-01-22T08:44:26.000Z | 2022-02-01T15:54:56.000Z | from django.conf.urls import url
from contactnetwork import views
# from django.views.generic import TemplateView
urlpatterns = [
url(r'^clusteringdata$', views.ClusteringData, name='clusteringdata'),
url(r'^clustering$', views.Clustering, name='clustering'),
url(r'^structure_clustering$', views.Clustering, name='clustering'),
url(r'^distances', views.ShowDistances, name='distances'),
url(r'^distancedatagroups', views.DistanceDataGroups, name='distancedatagroups'),
url(r'^distancedata', views.DistanceData, name='distancedata'),
url(r'^interactions[/]?$', views.Interactions, name='interactions'),
url(r'^comparative_analysis[/]?$', views.Interactions, name='interactions'),
url(r'^interactiondata', views.InteractionData, name='interactiondata'),
url(r'^browser[/]?$', views.InteractionBrowser, name='interactionsbrowser'),
url(r'^browserdata', views.InteractionBrowserData, name='interactionsbrowserdata'),
url(r'^state_contacts[/]?$', views.StateContacts, name='statecontacts'),
url(r'^pdbtreedata', views.PdbTreeData, name='pdbtreedata'),
url(r'^pdbtabledata', views.PdbTableData, name='pdbtabledata'),
url(r'^pdb/(?P<pdbname>\w+)$', views.ServePDB, name='serve_pdb'),
]
| 51.791667 | 87 | 0.722446 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 565 | 0.454545 |
6fb9c69f0b059e1e502f8dc0059898eb346e1f36 | 320 | py | Python | app.py | jero2rome/HelloWorld-Python | 67a4b600415e4c0011e4598efea874d0b49abd79 | [
"MIT"
]
| null | null | null | app.py | jero2rome/HelloWorld-Python | 67a4b600415e4c0011e4598efea874d0b49abd79 | [
"MIT"
]
| null | null | null | app.py | jero2rome/HelloWorld-Python | 67a4b600415e4c0011e4598efea874d0b49abd79 | [
"MIT"
]
| null | null | null | course = "Python Programming"
print(course.upper())
print(course.lower())
print(course.title())
course = " Python Programming"
print(course)
print(course.strip())
print(course.find("Pro"))
print(course.find("pro"))
print(course.replace("P", "-"))
print("Programming" in course)
print("Programming" not in course)
| 18.823529 | 34 | 0.709375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.26875 |
6fbc9ebec03dc29c3a7fc5b24ded057f188d61a0 | 1,401 | py | Python | FPN_Backend/api_utility/validators.py | DeeMATT/friendly-invention | c308168b088ad8e65ab6eddcdba22552a9c6987a | [
"MIT"
]
| null | null | null | FPN_Backend/api_utility/validators.py | DeeMATT/friendly-invention | c308168b088ad8e65ab6eddcdba22552a9c6987a | [
"MIT"
]
| null | null | null | FPN_Backend/api_utility/validators.py | DeeMATT/friendly-invention | c308168b088ad8e65ab6eddcdba22552a9c6987a | [
"MIT"
]
| null | null | null | import re
from data_transformer.views import stringIsInteger
def validateEmailFormat(email):
emailPattern = r'^([a-zA-Z0-9_\-\.]+)@([a-zA-Z0-9_\-\.]+)\.([a-zA-Z]{2,5})$'
if(re.search(emailPattern, email)):
return True
return False
def validatePhoneFormat(phone):
if not stringIsInteger(phone):
return False
# valid phone format for Nigeria without international dialing code e.g 081********
if phone.startswith('+'):
return len(phone) == 14
elif phone.startswith('234'):
return len(phone) == 13
else:
return len(phone) == 11
def validateThatAStringIsClean(value):
regex = re.compile(r'[@_!#$%^&*()<>?/\|}{~:]')
return (regex.search(value) == None)
def validateThatStringIsEmpty(value):
return (len(value.strip()) > 0)
def validateThatStringIsEmptyAndClean(value):
is_clean = (re.compile(r'[@_!#$%^&*()<>?/\|}{~:]').search(value) == None)
not_empty = (len(value.strip()) != 0)
return (is_clean and not_empty)
def validateThatListIsEmpty(value):
return (len(value) > 0)
def validateKeys(payload, requiredKeys):
# extract keys from payload
payloadKeys = list(payload.keys())
# check if extracted keys is present in requiredKeys
missingKeys = []
for key in requiredKeys:
if key not in payloadKeys:
missingKeys.append(key)
return missingKeys
| 24.155172 | 87 | 0.635974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 283 | 0.201999 |
6fbe42378fbc286f445856d3f64bebf5d1265f7a | 1,173 | py | Python | app/model.py | hfikry92/fast-api-auth-starter | 4d90980da7084961f8f25591aea587509e790f80 | [
"MIT"
]
| 43 | 2020-12-14T18:19:15.000Z | 2022-03-30T05:57:43.000Z | app/model.py | hfikry92/fast-api-auth-starter | 4d90980da7084961f8f25591aea587509e790f80 | [
"MIT"
]
| 3 | 2021-02-19T09:56:35.000Z | 2022-03-30T13:26:50.000Z | app/model.py | hfikry92/fast-api-auth-starter | 4d90980da7084961f8f25591aea587509e790f80 | [
"MIT"
]
| 16 | 2020-12-14T02:49:35.000Z | 2022-02-15T10:39:39.000Z | from pydantic import BaseModel, Field, EmailStr
class PostSchema(BaseModel):
id: int = Field(default=None)
title: str = Field(...)
content: str = Field(...)
class Config:
schema_extra = {
"example": {
"title": "Securing FastAPI applications with JWT.",
"content": "In this tutorial, you'll learn how to secure your application by enabling authentication using JWT. We'll be using PyJWT to sign, encode and decode JWT tokens...."
}
}
class UserSchema(BaseModel):
fullname: str = Field(...)
email: EmailStr = Field(...)
password: str = Field(...)
class Config:
schema_extra = {
"example": {
"fullname": "Abdulazeez Abdulazeez Adeshina",
"email": "[email protected]",
"password": "weakpassword"
}
}
class UserLoginSchema(BaseModel):
email: EmailStr = Field(...)
password: str = Field(...)
class Config:
schema_extra = {
"example": {
"email": "[email protected]",
"password": "weakpassword"
}
}
| 27.27907 | 191 | 0.535379 | 1,117 | 0.952259 | 0 | 0 | 0 | 0 | 0 | 0 | 388 | 0.330776 |
6fbf3fd6784e5fc825699ad96db6d0683069d114 | 188 | py | Python | griffin_powermate/__init__.py | alex-ong/griffin-powermate | 59f53647de2fd025b27552c37e22e4c8c176aba6 | [
"MIT"
]
| 11 | 2015-07-06T03:48:00.000Z | 2022-03-08T15:45:02.000Z | griffin_powermate/__init__.py | alex-ong/griffin-powermate | 59f53647de2fd025b27552c37e22e4c8c176aba6 | [
"MIT"
]
| 2 | 2018-03-30T17:11:53.000Z | 2018-03-31T10:49:22.000Z | griffin_powermate/__init__.py | alex-ong/griffin-powermate | 59f53647de2fd025b27552c37e22e4c8c176aba6 | [
"MIT"
]
| 6 | 2015-07-10T23:12:38.000Z | 2021-07-05T05:17:22.000Z | __version__ = '1.0.2'
__author__ = 'Christian Musa <[email protected]>'
__url__ = 'https://github.com/crash7/griffin-powermate'
__all__ = []
from griffin_powermate import * | 31.333333 | 59 | 0.718085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 94 | 0.5 |
6fc09d38fc6e352436484c44be3ed1f477d458b5 | 109 | py | Python | servermn/core/__init__.py | masterhung0112/servermn | d518f2fa394bb3e22c29e74802357c2aa054f392 | [
"Unlicense"
]
| null | null | null | servermn/core/__init__.py | masterhung0112/servermn | d518f2fa394bb3e22c29e74802357c2aa054f392 | [
"Unlicense"
]
| null | null | null | servermn/core/__init__.py | masterhung0112/servermn | d518f2fa394bb3e22c29e74802357c2aa054f392 | [
"Unlicense"
]
| null | null | null | def init():
# Set locale environment
# Set config
# Set user and group
# init logger
pass | 18.166667 | 28 | 0.59633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 69 | 0.633028 |
6fc12a48d65fbf13cb0aa456154ff1b694f2a2ec | 3,142 | py | Python | flink/test_flink.py | chekanskiy/bi-dataproc-initialization-actions | 8682ca537153f50ab58dc081a9d222ec61f8aa8e | [
"Apache-2.0"
]
| 1 | 2020-10-28T01:19:16.000Z | 2020-10-28T01:19:16.000Z | flink/test_flink.py | chekanskiy/bi-dataproc-initialization-actions | 8682ca537153f50ab58dc081a9d222ec61f8aa8e | [
"Apache-2.0"
]
| null | null | null | flink/test_flink.py | chekanskiy/bi-dataproc-initialization-actions | 8682ca537153f50ab58dc081a9d222ec61f8aa8e | [
"Apache-2.0"
]
| null | null | null | import unittest
from parameterized import parameterized
import os
from integration_tests.dataproc_test_case import DataprocTestCase
METADATA = 'flink-start-yarn-session=false'
class FlinkTestCase(DataprocTestCase):
COMPONENT = 'flink'
INIT_ACTION = 'gs://dataproc-initialization-actions/flink/flink.sh'
TEST_SCRIPT_FILE_NAME = 'validate.sh'
def verify_instance(self, name, yarn_session=True):
self.upload_test_file(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
self.TEST_SCRIPT_FILE_NAME
),
name)
self.__run_test_file(name, yarn_session)
self.remove_test_script(self.TEST_SCRIPT_FILE_NAME, name)
def __run_test_file(self, name, yarn_session):
if yarn_session == True:
cmd = 'gcloud compute ssh {} -- "bash {}"'.format(
name,
self.TEST_SCRIPT_FILE_NAME
)
else:
cmd = 'gcloud compute ssh {} -- "bash {} {}"'.format(
name,
self.TEST_SCRIPT_FILE_NAME,
yarn_session
)
ret_code, stdout, stderr = self.run_command(cmd)
self.assertEqual(ret_code, 0, "Failed to run test file. Error: {}".format(stderr))
@parameterized.expand([
("SINGLE", "1.1", ["m"], METADATA),
("STANDARD", "1.1", ["m"], None),
("HA", "1.1", ["m-0", "m-1", "m-2"], None),
("SINGLE", "1.2", ["m"], METADATA),
("STANDARD", "1.2", ["m"], None),
("HA", "1.2", ["m-0", "m-1", "m-2"], None),
("SINGLE", "1.3", ["m"], METADATA),
("STANDARD", "1.3", ["m"], None),
("HA", "1.3", ["m-0", "m-1", "m-2"], None),
], testcase_func_name=DataprocTestCase.generate_verbose_test_name)
def test_flink(self, configuration, dataproc_version, machine_suffixes, metadata):
self.createCluster(configuration, self.INIT_ACTION, dataproc_version, metadata=metadata)
for machine_suffix in machine_suffixes:
self.verify_instance(
"{}-{}".format(
self.getClusterName(),
machine_suffix
)
)
@parameterized.expand([
("STANDARD", "1.1", ["m"], METADATA),
("HA", "1.1", ["m-0", "m-1", "m-2"], METADATA),
("STANDARD", "1.2", ["m"], METADATA),
("HA", "1.2", ["m-0", "m-1", "m-2"], METADATA),
("SINGLE", "1.3", ["m"], METADATA),
("STANDARD", "1.3", ["m"], None),
("HA", "1.3", ["m-0", "m-1", "m-2"], None),
], testcase_func_name=DataprocTestCase.generate_verbose_test_name)
def test_flink_with_optional_metadata(self, configuration, dataproc_version, machine_suffixes, metadata):
self.createCluster(configuration, self.INIT_ACTION, dataproc_version, metadata=metadata)
for machine_suffix in machine_suffixes:
self.verify_instance(
"{}-{}".format(
self.getClusterName(),
machine_suffix
),
yarn_session=False
)
if __name__ == '__main__':
unittest.main()
| 37.404762 | 109 | 0.559198 | 2,912 | 0.926798 | 0 | 0 | 1,819 | 0.578931 | 0 | 0 | 556 | 0.176957 |
6fc1e91d9ee7bd81df3b499400e72d6a896fdb9d | 4,017 | py | Python | regression/testplan/firmware_small.py | sld-columbia/nvdla-sw | 79538ba1b52b040a4a4645f630e457fa01839e90 | [
"Apache-2.0"
]
| 407 | 2017-10-25T14:24:25.000Z | 2022-03-31T08:02:01.000Z | regression/testplan/firmware_small.py | sld-columbia/nvdla-sw | 79538ba1b52b040a4a4645f630e457fa01839e90 | [
"Apache-2.0"
]
| 227 | 2017-11-02T07:15:38.000Z | 2022-01-19T02:29:51.000Z | regression/testplan/firmware_small.py | sld-columbia/nvdla-sw | 79538ba1b52b040a4a4645f630e457fa01839e90 | [
"Apache-2.0"
]
| 199 | 2017-10-26T07:26:40.000Z | 2022-03-27T20:02:13.000Z | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import test_plan
import settings
class Module(test_plan.Testplan):
runScript = settings.KMD_RUNSCRIPT
deviceTargets = ['sim', 'ufpga']
def __init__(self):
super(Module, self).__init__(__name__)
# Convenience globals
kmd = Module.runScript
devices = Module.deviceTargets
ces = ["Core Engine Scheduler"]
nn = ["Neural Network"]
convd = ["CONV HW - Direct"]
convi = ["CONV HW - Image"]
convw = ["CONV HW - Winograd"]
convp = ["CONV HW - Pipeline"]
sdpx1 = ["SDP X1 HW"]
sdpx2 = ["SDP X2 HW"]
sdpy = ["SDP Y HW"]
sdpf = ["SDP HW - Full"]
cdp = ["CDP HW"]
pdp = ["PDP HW"]
def registerNvSmallTests(self, testplan):
testplan.append(
[0, "Written", kmd, "CONV_D_L0_0_small", None, convd, devices, "Convolution test - Sanity test direct convolution",
"Direct convolution, 8x8x128 input cube, 3x3x128 kernel cube and 32 kernels input and weight read from DRAM, no mean and bias data, output written to DRAM through SDP."])
testplan.append(
[0, "Written", kmd, "SDP_X1_L0_0_small", None, sdpx1, devices,
"SDP test - Sanity test for SDP, only X1 enabled with ALU, X2 and Y disable. No DMA used",
"Element wise sum operation in X1, 8x8x32 input cube and 8x8x32 bias cube. Activation function as ReLU"])
testplan.append(
[0, "Written", kmd, "CDP_L0_0_small", None, cdp, devices, "CDP test - Sanity test for CDP",
"Use only linear table with LUT configured with all 1. 8x8x32 input cube and 8x8x32 output cube."])
testplan.append(
[0, "Written", kmd, "PDP_L0_0_small", None, pdp, devices, "PDP test - Sanity test for PDP with max pooling",
"Max pooling, 8x8x32 input cube, 8x8x32 output cube, no padding, 1x1 kernel size. No need to compare data. It is enough if task succeeds to pass this test."])
testplan.append(
[0, "Written", kmd, "NN_L0_1_small", None, nn, devices, "AlexNet", "AlexNet"])
def registerFirmwareSmallTests(self):
testplan = []
registerNvSmallTests(self, testplan)
for item in testplan:
test = test_plan.Test()
test.level = item[0]
test.status = item[1]
test.runscript = item[2]
test.name = item[3]
test.options = item[4]
test.features = item[5]
test.targets = item[6]
test.description = item[7]
test.dependencies = None
self.add_test(test)
def registerTests(self):
registerFirmwareSmallTests(self)
Module.register_tests = registerTests
| 42.284211 | 179 | 0.707742 | 181 | 0.045059 | 0 | 0 | 0 | 0 | 0 | 0 | 2,611 | 0.649988 |
6fc2f27600b643c88618b8d387f08bcf982ed303 | 790 | py | Python | peaksql/datasets/narrowpeak.py | vanheeringen-lab/PeakSQL | b9290394605273b3a92815a8662fe22406254bfc | [
"MIT"
]
| null | null | null | peaksql/datasets/narrowpeak.py | vanheeringen-lab/PeakSQL | b9290394605273b3a92815a8662fe22406254bfc | [
"MIT"
]
| 17 | 2020-02-22T19:05:00.000Z | 2020-05-20T10:15:01.000Z | peaksql/datasets/narrowpeak.py | vanheeringen-lab/PeakSQL | b9290394605273b3a92815a8662fe22406254bfc | [
"MIT"
]
| 4 | 2020-02-28T12:47:18.000Z | 2020-03-06T13:00:20.000Z | import numpy as np
from typing import List, Tuple
from .base import _DataSet
class NarrowPeakDataSet(_DataSet):
"""
The NarrowPeakDataSet expects that narrowPeak files have been added to the DataBase.
"""
SELECT_LABEL = (
" Bed.ChromosomeId, Bed.ConditionId, BedVirtual_{assembly}.ChromStart, Bed.Peak"
)
def array_from_query(
self, query: List[Tuple[int, int, int]], chromstart: int, chromend: int,
) -> np.ndarray:
positions = np.zeros((len(self.all_conditions), self.inner_range), dtype=bool)
for condition_id, start, peak in query:
peak_idx = int(start - chromstart + peak)
if 0 <= peak_idx < positions.shape[1]:
positions[condition_id, peak_idx] = True
return positions
| 29.259259 | 88 | 0.653165 | 709 | 0.897468 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.227848 |
6fc517dc6f5fe11349e844166e7ed87944abff49 | 2,654 | py | Python | strip_ansi_escape_codes.py | neilrjones/DevOps-Python-tools | 12646de550ba296cf26e4a058e9a1cc1cc723c8f | [
"MIT"
]
| 1 | 2022-02-22T00:20:00.000Z | 2022-02-22T00:20:00.000Z | strip_ansi_escape_codes.py | neilrjones/DevOps-Python-tools | 12646de550ba296cf26e4a058e9a1cc1cc723c8f | [
"MIT"
]
| null | null | null | strip_ansi_escape_codes.py | neilrjones/DevOps-Python-tools | 12646de550ba296cf26e4a058e9a1cc1cc723c8f | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2018-09-09 23:06:06 +0100 (Sun, 09 Sep 2018)
#
# https://github.com/harisekhon/devops-python-tools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn and optionally send me feedback
# to help improve or steer this or other code I publish # pylint: disable=line-too-long
#
# https://www.linkedin.com/in/harisekhon
#
"""
Strip ANSI Escape Codes from Text String input
Works as a standard unix filter program, reading from file arguments or standard input and printing to standard output
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
libdir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'pylib'))
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from harisekhon.utils import die, ERRORS, log_option, strip_ansi_escape_codes
from harisekhon import CLI
except ImportError as _:
print('module import failed: %s' % _, file=sys.stderr)
print("Did you remember to build the project by running 'make'?", file=sys.stderr)
print("Alternatively perhaps you tried to copy this program out without it's adjacent libraries?", file=sys.stderr)
sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.2'
# pylint: disable=too-few-public-methods
class StripAnsiEscapeCodes(CLI):
# def __init__(self):
# # Python 2.x
# super(StripAnsiEscapeCodes, self).__init__()
# # Python 3.x
# # super().__init__()
def run(self):
if not self.args:
self.args.append('-')
for arg in self.args:
if arg == '-':
continue
if not os.path.exists(arg):
print("'%s' not found" % arg)
sys.exit(ERRORS['WARNING'])
if os.path.isfile(arg):
log_option('file', arg)
elif os.path.isdir(arg):
log_option('directory', arg)
else:
die("path '%s' could not be determined as either a file or directory" % arg)
for filename in self.args:
if filename == '-':
for line in sys.stdin:
print(strip_ansi_escape_codes(line), end='')
else:
with open(filename) as filehandle:
for line in filehandle:
print(strip_ansi_escape_codes(line), end='')
if __name__ == '__main__':
StripAnsiEscapeCodes().main()
| 31.223529 | 119 | 0.637905 | 1,100 | 0.414469 | 0 | 0 | 0 | 0 | 0 | 0 | 1,198 | 0.451394 |
6fc58731a5e67b957a08a7b99ed3506623297e19 | 301 | py | Python | vk_bot/mods/other/counting.py | triangle1984/GLaDOS | 39dea7bf8043e791ef079ea1ac6616f95d5b5312 | [
"BSD-3-Clause"
]
| 3 | 2019-12-12T05:48:34.000Z | 2020-12-07T19:23:41.000Z | vk_bot/mods/other/counting.py | anar66/vk-bot | 39dea7bf8043e791ef079ea1ac6616f95d5b5312 | [
"BSD-3-Clause"
]
| 1 | 2019-11-15T14:28:49.000Z | 2019-11-15T14:28:49.000Z | vk_bot/mods/other/counting.py | triangle1984/vk-bot | 39dea7bf8043e791ef079ea1ac6616f95d5b5312 | [
"BSD-3-Clause"
]
| 5 | 2019-11-20T14:20:30.000Z | 2022-02-05T10:37:01.000Z | from vk_bot.core.modules.basicplug import BasicPlug
import time
class Counting(BasicPlug):
command = ("отсчет",)
doc = "Отсчет от 1 до 3"
def main(self):
for x in range(3, -1, -1):
if x == 0:
return
self.sendmsg(x)
time.sleep(1)
| 25.083333 | 51 | 0.538206 | 252 | 0.794953 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.132492 |
6fc63d77d8ed73c401918b676d06084cc00b6c87 | 954 | py | Python | wind-oci-marketplace/setup.py | LaudateCorpus1/wind | d10dbc6baa98acab4927ff2b7a880b4727185582 | [
"UPL-1.0",
"Apache-2.0"
]
| 1 | 2022-02-07T15:56:24.000Z | 2022-02-07T15:56:24.000Z | wind-oci-marketplace/setup.py | LaudateCorpus1/wind | d10dbc6baa98acab4927ff2b7a880b4727185582 | [
"UPL-1.0",
"Apache-2.0"
]
| null | null | null | wind-oci-marketplace/setup.py | LaudateCorpus1/wind | d10dbc6baa98acab4927ff2b7a880b4727185582 | [
"UPL-1.0",
"Apache-2.0"
]
| 1 | 2022-02-18T01:23:46.000Z | 2022-02-18T01:23:46.000Z | ## Copyright © 2021, Oracle and/or its affiliates.
## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
#!/usr/bin/env python
from setuptools import setup
setup(name='wind-marketplace-library',
version="1.0.0",
description='Robot Framework test library for OCI Marketplace',
long_description='Robot Framework test library for OCI Marketplace',
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Framework :: WIND Robot Framework',
],
author='[email protected]',
author_email='[email protected]',
packages=['MarketplaceLibrary'],
license = "UPL-1.0",
install_requires=[
],
extras_require={
'dev': [
]
},
platforms='any',
include_package_data=True,
zip_safe=False) | 31.8 | 105 | 0.634172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 540 | 0.565445 |
6fc8616e9f969ac9e3ee973ff4f5b06bdc7a38e6 | 17 | py | Python | pointcloud2raster/__version__.py | NorthArrowResearch/pointcloud2raster | 419d82c9a62947e0258e308d2812f745df09794d | [
"MIT"
]
| null | null | null | pointcloud2raster/__version__.py | NorthArrowResearch/pointcloud2raster | 419d82c9a62947e0258e308d2812f745df09794d | [
"MIT"
]
| null | null | null | pointcloud2raster/__version__.py | NorthArrowResearch/pointcloud2raster | 419d82c9a62947e0258e308d2812f745df09794d | [
"MIT"
]
| null | null | null | __version__="0.3" | 17 | 17 | 0.764706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0.294118 |
6fc8809070d19daecb0e75b0cf66f5240983ed79 | 1,392 | py | Python | api/views.py | oil-rope/oil-and-rope | 6d59c87d4809f120417a90c1624952085486bb06 | [
"MIT"
]
| 8 | 2019-08-27T20:08:22.000Z | 2021-07-23T22:49:47.000Z | api/views.py | oil-rope/oil-and-rope | 6d59c87d4809f120417a90c1624952085486bb06 | [
"MIT"
]
| 73 | 2020-03-11T18:07:29.000Z | 2022-03-28T18:07:47.000Z | api/views.py | oil-rope/oil-and-rope | 6d59c87d4809f120417a90c1624952085486bb06 | [
"MIT"
]
| 4 | 2020-02-22T19:44:17.000Z | 2022-03-08T09:42:45.000Z | from django.http import JsonResponse
from django.shortcuts import reverse
from django.urls import NoReverseMatch
from django.views import View
from rest_framework import __version__ as drf_version
from rest_framework.exceptions import ValidationError
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
from oilandrope import __version__
class ApiVersionView(View):
http_method_names = ['get']
data = {
'version': __version__,
'powered_by': 'Django Rest Framework',
'drf_version': drf_version,
}
def get(self, request, *args, **kwargs):
return JsonResponse(self.data)
class URLResolverViewSet(ViewSet):
"""
Returns URL with given resolver and params.
"""
permission_classes = [AllowAny]
def resolve_url(self, request, *args, **kwargs):
data = request.data.copy()
if 'resolver' not in data:
raise ValidationError()
resolver = data.pop('resolver')
if isinstance(resolver, list):
resolver = resolver[0]
extra_params = {}
for key, value in data.items():
extra_params[key] = value
try:
url = reverse(resolver, kwargs=extra_params)
except NoReverseMatch:
url = '#no-url'
return Response({'url': url})
| 27.294118 | 56 | 0.66954 | 962 | 0.691092 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.111351 |
6fc9a185be50739efc8e58ecf9750c6070063f42 | 1,525 | py | Python | 5_AIOps/Mlflow.py | stormsinbrewing/Covid-19-Predictor | 36bd768be8ac4aa81de86fe7f6bb592a7a711856 | [
"MIT"
]
| 3 | 2021-05-22T02:03:57.000Z | 2021-05-23T07:45:01.000Z | 5_AIOps/Mlflow.py | stormsinbrewing/Covid-19-Predictor | 36bd768be8ac4aa81de86fe7f6bb592a7a711856 | [
"MIT"
]
| null | null | null | 5_AIOps/Mlflow.py | stormsinbrewing/Covid-19-Predictor | 36bd768be8ac4aa81de86fe7f6bb592a7a711856 | [
"MIT"
]
| null | null | null | from verta import Client
import pickle
import mlflow
import mlflow.sklearn
from mlflow.tracking import MlflowClient
import os
def downloadArtifact(proj,exp_name,exp_run, serialization):
client = Client("http://localhost:3000")
proj = client.set_project(proj)
expt = client.set_experiment(exp_name)
run = client.set_experiment_run(exp_run)
if serialization.lower() == 'pickle':
run.download_model('model.pkl')
def logModel(library, modelName):
infile = open('./model.pkl','rb')
model = pickle.load(infile)
print ('Loaded Model')
infile.close()
mlflow.set_tracking_uri("sqlite:///mlruns.db")
if library.lower() == 'pytorch':
mlflow.pytorch.log_model (model, "covid-predictor",registered_model_name=modelName)
# mlflow.tensorflow.log_model (tf_saved_model_dir='.',registered_model_name=modelName,tf_meta_graph_tags=[],tf_signature_def_key='covid-predictor', artifact_path='model_dir/')
client = MlflowClient()
client.transition_model_version_stage(
name=modelName,
version=1,
stage="Production"
)
print ('Logged model')
def serveModel(modelName):
os.environ["MLFLOW_TRACKING_URI"]="sqlite:///mlruns.db"
os.system("mlflow models serve -m models:/CovidPredictor/production -p 2000 --no-conda")
# Function Calls ("MajorII","CovidPredictor","Version 1","model.pkl","pickle","pytorch")
downloadArtifact("MajorII","CovidPredictor","Version 1","pickle")
logModel("pytorch","CovidPredictor")
#serveModel("CovidPredictor")
| 36.309524 | 182 | 0.72459 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 633 | 0.415082 |
6fc9dcd451ff1e384504442cc9b3cb27f7b5e6af | 3,832 | py | Python | src/arclink/apps/misc/dump_db.py | yannikbehr/seiscomp3 | ebb44c77092555eef7786493d00ac4efc679055f | [
"Naumen",
"Condor-1.1",
"MS-PL"
]
| 94 | 2015-02-04T13:57:34.000Z | 2021-11-01T15:10:06.000Z | src/arclink/apps/misc/dump_db.py | yannikbehr/seiscomp3 | ebb44c77092555eef7786493d00ac4efc679055f | [
"Naumen",
"Condor-1.1",
"MS-PL"
]
| 233 | 2015-01-28T15:16:46.000Z | 2021-08-23T11:31:37.000Z | src/arclink/apps/misc/dump_db.py | yannikbehr/seiscomp3 | ebb44c77092555eef7786493d00ac4efc679055f | [
"Naumen",
"Condor-1.1",
"MS-PL"
]
| 95 | 2015-02-13T15:53:30.000Z | 2021-11-02T14:54:54.000Z | #!/usr/bin/env python
#*****************************************************************************
# dump_db.py
#
# Dump inventory database in XML format
#
# (c) 2006 Andres Heinloo, GFZ Potsdam
# (c) 2007 Mathias Hoffmann, GFZ Potsdam
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any later
# version. For more information, see http://www.gnu.org/
#*****************************************************************************
import sys
from seiscomp import logs
from seiscomp.db.seiscomp3 import sc3wrap
from seiscomp.db.seiscomp3.inventory import Inventory as SC3Inventory
from seiscomp.db.seiscomp3.routing import Routing as SC3Routing
from seiscomp3 import Core, Client, DataModel, Logging
VERSION = "1.2 (2012.313)"
class DumpDB(Client.Application):
def __init__(self, argc, argv):
Client.Application.__init__(self, argc, argv)
self.routingMode = False
self.addAccess = False
self.output_file = None
self.setLoggingToStdErr(True)
self.setMessagingEnabled(True)
self.setDatabaseEnabled(True, True)
self.setAutoApplyNotifierEnabled(False)
self.setInterpretNotifierEnabled(False)
self.setPrimaryMessagingGroup("LISTENER_GROUP")
def createCommandLineDescription(self):
Client.Application.createCommandLineDescription(self)
self.commandline().addGroup("ArcLink")
self.commandline().addOption("ArcLink", "routing", "dump routing instead of inventory")
self.commandline().addOption("ArcLink", "with-access", "dump access together with routing information")
def validateParameters(self):
try:
if self.commandline().hasOption("routing"):
self.routingMode = True
if self.commandline().hasOption("with-access"):
self.addAccess = True
args = self.commandline().unrecognizedOptions()
if len(args) != 1:
print >>sys.stderr, "Usage: dump_db [options] file"
return False
self.output_file = args[0]
except Exception:
logs.print_exc()
return False
return True
def initConfiguration(self):
if not Client.Application.initConfiguration(self):
return False
# force logging to stderr even if logging.file = 1
self.setLoggingToStdErr(True)
return True
def run(self):
try:
sc3wrap.dbQuery = self.query()
DataModel.Notifier.Enable()
DataModel.Notifier.SetCheckEnabled(False)
if not self.routingMode:
self.inv = SC3Inventory(self.query().loadInventory())
self.inv.load_stations("*")
self.inv.load_stations("*", "*")
self.inv.load_stations("*", "*", "*")
self.inv.load_stations("*", "*", "*", "*")
self.inv.load_instruments()
self.inv.save_xml(self.output_file, instr=2)
else:
self.rtn = SC3Routing(self.query().loadRouting())
self.rtn.load_routes("*", "*")
if self.addAccess:
self.rtn.load_access()
self.rtn.save_xml(self.output_file, self.addAccess)
except Exception:
logs.print_exc()
return False
return True
if __name__ == "__main__":
logs.debug = Logging.debug
logs.info = Logging.info
logs.notice = Logging.notice
logs.warning = Logging.warning
logs.error = Logging.error
app = DumpDB(len(sys.argv), sys.argv)
sys.exit(app())
| 33.034483 | 111 | 0.592119 | 2,684 | 0.700418 | 0 | 0 | 0 | 0 | 0 | 0 | 897 | 0.234081 |
6fca5a30c5488af2b1fdbc0fc7967e56d0f30371 | 619 | py | Python | ypricemagic/magic.py | cartercarlson/ypricemagic | f17fec155db7fb44ee624cd6e75193f17c6238cf | [
"MIT"
]
| 1 | 2022-03-28T16:07:07.000Z | 2022-03-28T16:07:07.000Z | ypricemagic/magic.py | cartercarlson/ypricemagic | f17fec155db7fb44ee624cd6e75193f17c6238cf | [
"MIT"
]
| null | null | null | ypricemagic/magic.py | cartercarlson/ypricemagic | f17fec155db7fb44ee624cd6e75193f17c6238cf | [
"MIT"
]
| null | null | null |
import logging
from typing import Optional
from y import magic
from y.datatypes import UsdPrice
from y.typing import AnyAddressType, Block
logger = logging.getLogger(__name__)
def get_price(token: AnyAddressType, block: Optional[Block] = None) -> UsdPrice:
logger.warn('ypricemagic is in the process of being migrated to y.'
'y can do all of the same old stuff you expect, plus some new stuff.'
'This method still works for now, but will be removed soon.'
'Please update your scripts to use `y.get_price(token, block)`.')
return magic.get_price(token, block)
| 36.411765 | 85 | 0.704362 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 248 | 0.400646 |
6fcab68471b1398dafce699d3dbe1a96583efdf6 | 4,248 | py | Python | zerver/tests/test_realm_playgrounds.py | moazzammoriani/zulip | ca506f71dc8b733827a6bf532b107291b4839e55 | [
"Apache-2.0"
]
| 3 | 2021-09-12T05:05:28.000Z | 2021-12-30T09:45:20.000Z | zerver/tests/test_realm_playgrounds.py | moazzammoriani/zulip | ca506f71dc8b733827a6bf532b107291b4839e55 | [
"Apache-2.0"
]
| null | null | null | zerver/tests/test_realm_playgrounds.py | moazzammoriani/zulip | ca506f71dc8b733827a6bf532b107291b4839e55 | [
"Apache-2.0"
]
| null | null | null | from zerver.lib.actions import do_add_realm_playground
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import RealmPlayground, get_realm
class RealmPlaygroundTests(ZulipTestCase):
def test_create_one_playground_entry(self) -> None:
iago = self.example_user("iago")
payload = {
"name": "Python playground",
"pygments_language": "Python",
"url_prefix": "https://python.example.com",
}
# Now send a POST request to the API endpoint.
resp = self.api_post(iago, "/json/realm/playgrounds", payload)
self.assert_json_success(resp)
# Check if the actual object exists
realm = get_realm("zulip")
self.assertTrue(
RealmPlayground.objects.filter(realm=realm, name="Python playground").exists()
)
def test_create_multiple_playgrounds_for_same_language(self) -> None:
iago = self.example_user("iago")
data = [
{
"name": "Python playground 1",
"pygments_language": "Python",
"url_prefix": "https://python.example.com",
},
{
"name": "Python playground 2",
"pygments_language": "Python",
"url_prefix": "https://python2.example.com",
},
]
for payload in data:
resp = self.api_post(iago, "/json/realm/playgrounds", payload)
self.assert_json_success(resp)
realm = get_realm("zulip")
self.assertTrue(
RealmPlayground.objects.filter(realm=realm, name="Python playground 1").exists()
)
self.assertTrue(
RealmPlayground.objects.filter(realm=realm, name="Python playground 2").exists()
)
def test_invalid_params(self) -> None:
iago = self.example_user("iago")
payload = {
"name": "Invalid URL",
"pygments_language": "Python",
"url_prefix": "https://invalid-url",
}
resp = self.api_post(iago, "/json/realm/playgrounds", payload)
self.assert_json_error(resp, "url_prefix is not a URL")
payload["url_prefix"] = "https://python.example.com"
payload["pygments_language"] = "a$b$c"
resp = self.api_post(iago, "/json/realm/playgrounds", payload)
self.assert_json_error(resp, "Invalid characters in pygments language")
def test_create_already_existing_playground(self) -> None:
iago = self.example_user("iago")
payload = {
"name": "Python playground",
"pygments_language": "Python",
"url_prefix": "https://python.example.com",
}
resp = self.api_post(iago, "/json/realm/playgrounds", payload)
self.assert_json_success(resp)
resp = self.api_post(iago, "/json/realm/playgrounds", payload)
self.assert_json_error(
resp, "Realm playground with this Realm, Pygments language and Name already exists."
)
def test_not_realm_admin(self) -> None:
hamlet = self.example_user("hamlet")
resp = self.api_post(hamlet, "/json/realm/playgrounds")
self.assert_json_error(resp, "Must be an organization administrator")
resp = self.api_delete(hamlet, "/json/realm/playgrounds/1")
self.assert_json_error(resp, "Must be an organization administrator")
def test_delete_realm_playground(self) -> None:
iago = self.example_user("iago")
realm = get_realm("zulip")
playground_info = dict(
name="Python playground",
pygments_language="Python",
url_prefix="https://python.example.com",
)
playground_id = do_add_realm_playground(realm, acting_user=iago, **playground_info)
self.assertTrue(RealmPlayground.objects.filter(name="Python playground").exists())
result = self.api_delete(iago, f"/json/realm/playgrounds/{playground_id + 1}")
self.assert_json_error(result, "Invalid playground")
result = self.api_delete(iago, f"/json/realm/playgrounds/{playground_id}")
self.assert_json_success(result)
self.assertFalse(RealmPlayground.objects.filter(name="Python").exists())
| 38.27027 | 96 | 0.62194 | 4,087 | 0.9621 | 0 | 0 | 0 | 0 | 0 | 0 | 1,333 | 0.313795 |
6fcb558a97ebc6e7f0af8053d4b5eef0ee0f741f | 2,733 | py | Python | seisflows/plugins/optimize/NLCG.py | niyiyu2316/seisflows | f861f1bf59b24854e7ca6a0dfc383f2dcd396969 | [
"BSD-2-Clause"
]
| 1 | 2021-12-01T07:37:57.000Z | 2021-12-01T07:37:57.000Z | seisflows/plugins/optimize/NLCG.py | niyiyu/seisflows | f861f1bf59b24854e7ca6a0dfc383f2dcd396969 | [
"BSD-2-Clause"
]
| null | null | null | seisflows/plugins/optimize/NLCG.py | niyiyu/seisflows | f861f1bf59b24854e7ca6a0dfc383f2dcd396969 | [
"BSD-2-Clause"
]
| null | null | null | #
# This is Seisflows
#
# See LICENCE file
#
#
###############################################################################
# Import system modules
import os
# Import Numpy
import numpy as np
# Local imports
from seisflows.tools import unix
from seisflows.tools.math import dot
from seisflows.tools.tools import loadtxt, savetxt, loadnpy, savenpy
class NLCG:
""" Nonlinear conjugate gradient method
"""
def __init__(self, path='.', load=loadnpy, save=savenpy, thresh=1.,
maxiter=np.inf, precond=None):
self.path = path
self.load = load
self.save = save
self.maxiter = maxiter
self.thresh = thresh
self.precond = precond
try:
self.iter = loadtxt(self.path+'/'+'NLCG/iter')
except IOError:
unix.mkdir(self.path+'/'+'NLCG')
self.iter = 0
def __call__(self):
""" Returns NLCG search direction
"""
self.iter += 1
savetxt(self.path+'/'+'NLCG/iter', self.iter)
unix.cd(self.path)
g_new = self.load('g_new')
if self.iter == 1:
return -g_new, 0
elif self.iter > self.maxiter:
print('restarting NLCG... [periodic restart]')
self.restart()
return -g_new, 1
# compute search direction
g_old = self.load('g_old')
p_old = self.load('p_old')
if self.precond:
beta = pollak_ribere(g_new, g_old, self.precond)
p_new = -self.precond(g_new) + beta*p_old
else:
beta = pollak_ribere(g_new, g_old)
p_new = -g_new + beta*p_old
# check restart conditions
if check_conjugacy(g_new, g_old) > self.thresh:
print('restarting NLCG... [loss of conjugacy]')
self.restart()
return -g_new, 1
elif check_descent(p_new, g_new) > 0.:
print('restarting NLCG... [not a descent direction]')
self.restart()
return -g_new, 1
else:
return p_new, 0
def restart(self):
""" Restarts algorithm
"""
self.iter = 1
savetxt(self.path+'/'+'NLCG/iter', self.iter)
# Utility functions
def fletcher_reeves(g_new, g_old, precond=lambda x: x):
num = dot(precond(g_new), g_new)
den = dot(g_old, g_old)
beta = num/den
return beta
def pollak_ribere(g_new, g_old, precond=lambda x: x):
num = dot(precond(g_new), g_new-g_old)
den = dot(g_old, g_old)
beta = num/den
return beta
def check_conjugacy(g_new, g_old):
return abs(dot(g_new, g_old) / dot(g_new, g_new))
def check_descent(p_new, g_new):
return dot(p_new, g_new) / dot(g_new, g_new)
| 24.621622 | 79 | 0.559458 | 1,861 | 0.680937 | 0 | 0 | 0 | 0 | 0 | 0 | 569 | 0.208196 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.