hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6f073d830bc26d55a9b16a99438ab898d40254be | 3,418 | py | Python | mcpyrate/markers.py | Technologicat/mcpyrate | 8182a8d246554b152e281d0f6c912e35ea58c316 | [
"MIT"
]
| 34 | 2020-10-13T19:22:36.000Z | 2022-01-28T00:53:55.000Z | mcpyrate/markers.py | Technologicat/mcpyrate | 8182a8d246554b152e281d0f6c912e35ea58c316 | [
"MIT"
]
| 32 | 2020-10-16T16:29:54.000Z | 2022-01-27T15:45:51.000Z | mcpyrate/markers.py | Technologicat/mcpyrate | 8182a8d246554b152e281d0f6c912e35ea58c316 | [
"MIT"
]
| 2 | 2020-10-17T19:07:26.000Z | 2021-02-20T01:43:50.000Z | # -*- coding: utf-8; -*-
"""AST markers for internal communication.
*Internal* here means they are to be never passed to Python's `compile`;
macros may use them to work together.
"""
__all__ = ["ASTMarker", "get_markers", "delete_markers", "check_no_markers_remaining"]
import ast
from . import core, utils, walkers
class ASTMarker(ast.AST):
"""Base class for AST markers.
Markers are AST-node-like objects meant for communication between
co-operating, related macros. They are also used by the macro expander
to talk with itself during expansion.
We inherit from `ast.AST`, so that during macro expansion, a marker
behaves like a single AST node.
It is a postcondition of a completed macro expansion that no markers
remain in the AST.
To help fail-fast, if you define your own marker types, use `get_markers`
to check (at an appropriate point) that the expanded AST has no instances
of your own markers remaining. (You'll want a base class for your own markers.)
A typical usage example is in the quasiquote system, where the unquote
operators (some of which expand to markers) may only appear inside a quoted
section. So just before the quote operator exits, it checks that all
quasiquote markers within that section have been compiled away.
"""
# TODO: Silly default `None`, because `copy` and `deepcopy` call `__init__` without arguments,
# TODO: though the docs say they behave like `pickle` (and wouldn't thus need to call __init__ at all!).
def __init__(self, body=None):
"""body: the actual AST that is annotated by this marker"""
self.body = body
self._fields = ["body"] # support ast.iter_fields
def get_markers(tree, cls=ASTMarker):
"""Return a `list` of any `cls` instances found in `tree`. For output validation."""
class ASTMarkerCollector(walkers.ASTVisitor):
def examine(self, tree):
if isinstance(tree, cls):
self.collect(tree)
self.generic_visit(tree)
w = ASTMarkerCollector()
w.visit(tree)
return w.collected
def delete_markers(tree, cls=ASTMarker):
"""Delete any `cls` ASTMarker instances found in `tree`.
The deletion takes place by replacing each marker node with
the actual AST node stored in its `body` attribute.
"""
class ASTMarkerDeleter(walkers.ASTTransformer):
def transform(self, tree):
if isinstance(tree, cls):
return self.visit(tree.body)
return self.generic_visit(tree)
return ASTMarkerDeleter().visit(tree)
def check_no_markers_remaining(tree, *, filename, cls=None):
"""Check that `tree` has no AST markers remaining.
If a class `cls` is provided, only check for markers that `isinstance(cls)`.
If there are any, raise `MacroExpansionError`.
No return value.
`filename` is the full path to the `.py` file, for error reporting.
Convenience function.
"""
cls = cls or ASTMarker
remaining_markers = get_markers(tree, cls)
if remaining_markers:
codes = [utils.format_context(node, n=5) for node in remaining_markers]
locations = [utils.format_location(filename, node, code) for node, code in zip(remaining_markers, codes)]
report = "\n\n".join(locations)
raise core.MacroExpansionError(f"{filename}: AST markers remaining after expansion:\n{report}")
| 37.977778 | 113 | 0.693681 | 1,792 | 0.524283 | 0 | 0 | 0 | 0 | 0 | 0 | 2,159 | 0.631656 |
6f0871e5f1835b667efee97ba793562fead702a2 | 1,960 | py | Python | lambda.py | deepanshu-yadav/NSFW-Classifier | ec6a98eb982ec30c2a21ca11dc92d580cc8a8981 | [
"MIT"
]
| 13 | 2019-09-18T18:32:17.000Z | 2022-03-01T08:01:18.000Z | lambda.py | deepanshu-yadav/NSFW-Classifier | ec6a98eb982ec30c2a21ca11dc92d580cc8a8981 | [
"MIT"
]
| null | null | null | lambda.py | deepanshu-yadav/NSFW-Classifier | ec6a98eb982ec30c2a21ca11dc92d580cc8a8981 | [
"MIT"
]
| 4 | 2020-03-27T10:00:52.000Z | 2021-04-23T03:30:43.000Z | import boto3
import json
import numpy as np
import base64, os, boto3, ast, json
endpoint = 'myprojectcapstone'
def format_response(message, status_code):
return {
'statusCode': str(status_code),
'body': json.dumps(message),
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
}
}
def lambda_handler(event, context):
try :
body = json.loads(event['body'])
image = base64.b64decode(body['data'].replace('data:image/png;base64,', ''))
try :
runtime = boto3.Session().client(service_name='sagemaker-runtime', region_name='us-east-2')
response = runtime.invoke_endpoint(EndpointName=endpoint, ContentType='application/x-image', Body=image)
print(response)
try:
probs = response['Body'].read()
probs = json.loads(probs)
#probs = ast.literal_eval(probs)
#pred = probs.index(max(probs))
pred = np.argmax( np.array( probs ) )
if pred == 0:
resp = 'Animated Nsfw'
elif pred == 1:
resp = 'Conatins Nudity'
elif pred == 2:
resp = 'Contains Porn'
elif pred == 4:
resp = 'Conatins semi Nudity'
else :
resp = 'Safe For viewing'
return format_response(resp, 200)
except:
return format_response('Ouch! Something went wrong with loading json data from endpoint'+response['Body'].read() , 200)
except :
return format_response('Ouch! Something went wrong with endpoint' , 200)
except :
return format_response('Ouch! Something went wrong with decoding' , 200)
| 32.131148 | 147 | 0.514286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 510 | 0.260204 |
6f08e7a44962b3d4ce1d67b7f28da022e46eb7fe | 4,097 | py | Python | src/bindings/python/tests/test_ngraph/test_eye.py | si-eun-kim/openvino | 1db4446e2a6ead55d066e0b4e718fa37f509353a | [
"Apache-2.0"
]
| 2 | 2021-12-14T15:27:46.000Z | 2021-12-14T15:34:16.000Z | src/bindings/python/tests/test_ngraph/test_eye.py | si-eun-kim/openvino | 1db4446e2a6ead55d066e0b4e718fa37f509353a | [
"Apache-2.0"
]
| 33 | 2021-09-23T04:14:30.000Z | 2022-01-24T13:21:32.000Z | src/bindings/python/tests/test_ngraph/test_eye.py | si-eun-kim/openvino | 1db4446e2a6ead55d066e0b4e718fa37f509353a | [
"Apache-2.0"
]
| 11 | 2021-11-09T00:51:40.000Z | 2021-11-10T12:04:16.000Z | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import openvino.runtime.opset9 as ov
import numpy as np
import pytest
from tests.runtime import get_runtime
from openvino.runtime.utils.types import get_element_type_str
from openvino.runtime.utils.types import get_element_type
@pytest.mark.parametrize(
"num_rows, num_columns, diagonal_index, out_type",
[
pytest.param(2, 5, 0, np.float32),
pytest.param(5, 3, 2, np.int64),
pytest.param(3, 3, -1, np.float16),
pytest.param(5, 5, -10, np.float32),
],
)
def test_eye_rectangle(num_rows, num_columns, diagonal_index, out_type):
num_rows_array = np.array([num_rows], np.int32)
num_columns_array = np.array([num_columns], np.int32)
diagonal_index_array = np.array([diagonal_index], np.int32)
num_rows_tensor = ov.constant(num_rows_array)
num_columns_tensor = ov.constant(num_columns_array)
diagonal_index_tensor = ov.constant(diagonal_index_array)
# Create with param names
eye_node = ov.eye(num_rows=num_rows_tensor,
num_columns=num_columns_tensor,
diagonal_index=diagonal_index_tensor,
output_type=get_element_type_str(out_type))
# Create with default orded
eye_node = ov.eye(num_rows_tensor,
num_columns_tensor,
diagonal_index_tensor,
get_element_type_str(out_type))
expected_results = np.eye(num_rows, M=num_columns, k=diagonal_index, dtype=np.float32)
assert eye_node.get_type_name() == "Eye"
assert eye_node.get_output_size() == 1
assert eye_node.get_output_element_type(0) == get_element_type(out_type)
assert tuple(eye_node.get_output_shape(0)) == expected_results.shape
# TODO: Enable with Eye reference implementation
# runtime = get_runtime()
# computation = runtime.computation(eye_node)
# eye_results = computation()
# assert np.allclose(eye_results, expected_results)
@pytest.mark.parametrize(
"num_rows, num_columns, diagonal_index, batch_shape, out_type",
[
pytest.param(2, 5, 0, [1], np.float32),
pytest.param(5, 3, 2, [2, 2], np.int64),
pytest.param(3, 3, -1, [1, 3, 2], np.float16),
pytest.param(5, 5, -10, [1, 1], np.float32),
],
)
def test_eye_batch_shape(num_rows, num_columns, diagonal_index, batch_shape, out_type):
num_rows_array = np.array([num_rows], np.int32)
num_columns_array = np.array([num_columns], np.int32)
diagonal_index_array = np.array([diagonal_index], np.int32)
batch_shape_array = np.array(batch_shape, np.int32)
num_rows_tensor = ov.constant(num_rows_array)
num_columns_tensor = ov.constant(num_columns_array)
diagonal_index_tensor = ov.constant(diagonal_index_array)
batch_shape_tensor = ov.constant(batch_shape_array)
# Create with param names
eye_node = ov.eye(num_rows=num_rows_tensor,
num_columns=num_columns_tensor,
diagonal_index=diagonal_index_tensor,
batch_shape=batch_shape_tensor,
output_type=get_element_type_str(out_type))
# Create with default orded
eye_node = ov.eye(num_rows_tensor,
num_columns_tensor,
diagonal_index_tensor,
get_element_type_str(out_type),
batch_shape_tensor)
output_shape = [*batch_shape, 1, 1]
one_matrix = np.eye(num_rows, M=num_columns, k=diagonal_index, dtype=np.float32)
expected_results = np.tile(one_matrix, output_shape)
assert eye_node.get_type_name() == "Eye"
assert eye_node.get_output_size() == 1
assert eye_node.get_output_element_type(0) == get_element_type(out_type)
assert tuple(eye_node.get_output_shape(0)) == expected_results.shape
# TODO: Enable with Eye reference implementation
# runtime = get_runtime()
# computation = runtime.computation(eye_node)
# eye_results = computation()
# assert np.allclose(eye_results, expected_results)
| 39.776699 | 90 | 0.686112 | 0 | 0 | 0 | 0 | 3,779 | 0.922382 | 0 | 0 | 701 | 0.171101 |
6f09c66c2c39712c9d1518ff1035780b17e4b03c | 2,371 | py | Python | tests/error/test_format_error.py | GDGSNF/graphql-core | 35aa9b261c850aa5f0c335c2405956fd41ed5ca2 | [
"MIT"
]
| 590 | 2015-10-06T18:22:49.000Z | 2022-03-22T16:32:17.000Z | tests/error/test_format_error.py | vpetrovykh/graphql-core | 7af97e22afb27861fc1b7d7ca0292095f8427ecb | [
"MIT"
]
| 300 | 2015-10-06T18:58:11.000Z | 2022-03-22T14:01:44.000Z | tests/error/test_format_error.py | vpetrovykh/graphql-core | 7af97e22afb27861fc1b7d7ca0292095f8427ecb | [
"MIT"
]
| 270 | 2015-10-08T19:47:38.000Z | 2022-03-10T04:17:51.000Z | from typing import List, Union
from pytest import raises
from graphql.error import GraphQLError, format_error
from graphql.language import Node, Source
from graphql.pyutils import Undefined
def describe_format_error():
def formats_graphql_error():
source = Source(
"""
query {
something
}"""
)
path: List[Union[int, str]] = ["one", 2]
extensions = {"ext": None}
error = GraphQLError(
"test message",
Node(),
source,
[14, 40],
path,
ValueError("original"),
extensions=extensions,
)
formatted = format_error(error)
assert formatted == error.formatted
assert formatted == {
"message": "test message",
"locations": [{"line": 2, "column": 14}, {"line": 3, "column": 20}],
"path": path,
"extensions": extensions,
}
def uses_default_message():
# noinspection PyTypeChecker
formatted = format_error(GraphQLError(None)) # type: ignore
assert formatted == {
"message": "An unknown error occurred.",
"locations": None,
"path": None,
}
def includes_path():
path: List[Union[int, str]] = ["path", 3, "to", "field"]
error = GraphQLError("msg", path=path)
formatted = format_error(error)
assert formatted == error.formatted
assert formatted == {"message": "msg", "locations": None, "path": path}
def includes_extension_fields():
error = GraphQLError("msg", extensions={"foo": "bar"})
formatted = format_error(error)
assert formatted == error.formatted
assert formatted == {
"message": "msg",
"locations": None,
"path": None,
"extensions": {"foo": "bar"},
}
def rejects_none_and_undefined_errors():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
format_error(None) # type: ignore
assert str(exc_info.value) == "Expected a GraphQLError."
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
format_error(Undefined) # type: ignore
assert str(exc_info.value) == "Expected a GraphQLError."
| 31.197368 | 80 | 0.554197 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 531 | 0.223956 |
6f0a8a484c64fa9bfcfccccb0a0f15f2d119765a | 6,708 | py | Python | pymonad/test/test_Maybe.py | bjd2385/pymonad | baec7a540d9195b2da029d1a101edd7c385f94bb | [
"BSD-3-Clause"
]
| null | null | null | pymonad/test/test_Maybe.py | bjd2385/pymonad | baec7a540d9195b2da029d1a101edd7c385f94bb | [
"BSD-3-Clause"
]
| null | null | null | pymonad/test/test_Maybe.py | bjd2385/pymonad | baec7a540d9195b2da029d1a101edd7c385f94bb | [
"BSD-3-Clause"
]
| null | null | null | # --------------------------------------------------------
# (c) Copyright 2014 by Jason DeLaat.
# Licensed under BSD 3-clause licence.
# --------------------------------------------------------
import unittest
from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing
from pymonad.Reader import curry
from pymonad.test.MonadTester import *
from pymonad.test.MonoidTester import *
class TestJustFunctor(unittest.TestCase, MonadTester):
def __init__(self, x):
super(TestJustFunctor, self).__init__(x)
self.setClassUnderTest(Just)
def testFunctorLaws(self):
self.given(8)
self.ensure_first_functor_law_holds()
self.ensure_second_functor_law_holds()
class TestNothingFunctor(unittest.TestCase, MonadTester):
def __init__(self, x):
super(TestNothingFunctor, self).__init__(x)
self.setClassUnderTest(_Nothing)
def testFunctorLaws(self):
self.given(None)
self.ensure_first_functor_law_holds()
self.ensure_second_functor_law_holds()
class TestJustApplicative(unittest.TestCase, MonadTester):
def __init__(self, x):
super(TestJustApplicative, self).__init__(x)
self.setClassUnderTest(Just)
def testApplicativeLaws(self):
self.given(8)
self.ensure_first_applicative_law_holds()
self.ensure_second_applicative_law_holds()
self.ensure_third_applicative_law_holds()
self.ensure_fourth_applicative_law_holds()
self.ensure_fifth_applicative_law_holds()
class TestNothingApplicative(unittest.TestCase, MonadTester):
def __init__(self, x):
super(TestNothingApplicative, self).__init__(x)
self.setClassUnderTest(_Nothing)
def testApplicativeLaws(self):
self.given(None)
self.ensure_first_applicative_law_holds()
self.ensure_second_applicative_law_holds()
self.ensure_third_applicative_law_holds()
self.ensure_fourth_applicative_law_holds()
self.ensure_fifth_applicative_law_holds()
class TestJustMonad(unittest.TestCase, MonadTester):
def __init__(self, x):
super(TestJustMonad, self).__init__(x)
self.setClassUnderTest(Just)
def monad_function_f(self, x):
return Just(x + 10)
def monad_function_g(self, x):
return Just(x * 5)
def testMonadLaws(self):
self.given(8)
self.ensure_first_monad_law_holds()
self.ensure_second_monad_law_holds()
self.ensure_third_monad_law_holds()
class TestNothingMonad(unittest.TestCase, MonadTester):
def __init__(self, x):
super(TestNothingMonad, self).__init__(x)
self.setClassUnderTest(_Nothing)
def monad_function_f(self, x):
return Just(x + 10)
def monad_function_g(self, x):
return Just(x * 5)
def testMonadLaws(self):
self.given(None)
self.ensure_first_monad_law_holds()
self.ensure_second_monad_law_holds()
self.ensure_third_monad_law_holds()
class TestMaybeEquality(unittest.TestCase, MonadTester):
def testEqualityOfIdenticalTypes(self):
self.givenMonads(Just(8), Just(8))
self.ensureMonadsAreEqual()
def testInequalityOfIdenticalTypes(self):
self.givenMonads(Just(8), Just(9))
self.ensureMonadsAreNotEqual()
def testInequalityOfJustAndNothing(self):
self.givenMonads(Just(8), Nothing)
self.ensureMonadsAreNotEqual()
def testMonadComparisonExceptionWithJust(self):
self.givenMonads(Just(8), Reader(8))
self.ensureComparisonRaisesException()
def testMonadComparisonExceptionWithNothing(self):
self.givenMonads(Nothing, Reader(8))
self.ensureComparisonRaisesException()
class TestMaybeMonoid(unittest.TestCase, MonoidTester):
def test_mzero(self):
self.givenMonoid(Maybe)
self.get_mzero()
self.ensure_mzero_is(Nothing)
def test_right_identity(self):
self.givenMonoid(Just(9))
self.ensure_monoid_plus_zero_equals(Just(9))
def test_left_identity(self):
self.givenMonoid(Just(9))
self.ensure_zero_plus_monoid_equals(Just(9))
def test_associativity(self):
self.givenMonoids(Just(1), Just(2), Just(3))
self.ensure_associativity()
def test_mplus_with_two_just_values(self):
self.givenMonoids(Just(1), Just(2))
self.ensure_mconcat_equals(Just(3))
def test_mplus_with_one_just_and_one_nothing(self):
self.givenMonoids(Just(1), Nothing)
self.ensure_mconcat_equals(Just(1))
class TestFirstMonoid(unittest.TestCase, MonoidTester):
def test_mzero(self):
self.givenMonoid(First)
self.get_mzero()
self.ensure_mzero_is(First(Nothing))
def test_right_identity(self):
self.givenMonoid(First(Just(9)))
self.ensure_monoid_plus_zero_equals(First(Just(9)))
def test_left_identity(self):
self.givenMonoid(First(Just(9)))
self.ensure_zero_plus_monoid_equals(First(Just(9)))
def test_associativity(self):
self.givenMonoids(First(Just(1)), First(Just(2)), First(Just(3)))
self.ensure_associativity()
def test_mplus_with_two_just_values(self):
self.givenMonoids(First(Just(1)), First(Just(2)))
self.ensure_mconcat_equals(First(Just(1)))
def test_mplus_with_just_and_nothing(self):
self.givenMonoids(First(Just(1)), Nothing)
self.ensure_mconcat_equals(First(Just(1)))
def test_mplus_with_nothing_and_just(self):
self.givenMonoids(Nothing, First(Just(1)))
self.ensure_mconcat_equals(First(Just(1)))
class TestLastMonoid(unittest.TestCase, MonoidTester):
def test_mzero(self):
self.givenMonoid(Last)
self.get_mzero()
self.ensure_mzero_is(Last(Nothing))
def test_right_identity(self):
self.givenMonoid(Last(Just(9)))
self.ensure_monoid_plus_zero_equals(Last(Just(9)))
def test_left_identity(self):
self.givenMonoid(Last(Just(9)))
self.ensure_zero_plus_monoid_equals(Last(Just(9)))
def test_associativity(self):
self.givenMonoids(Last(Just(1)), Last(Just(2)), Last(Just(3)))
self.ensure_associativity()
def test_mplus_with_two_just_values(self):
self.givenMonoids(Last(Just(1)), Last(Just(2)))
self.ensure_mconcat_equals(Last(Just(2)))
def test_mplus_with_just_and_nothing(self):
self.givenMonoids(Last(Just(1)), Nothing)
self.ensure_mconcat_equals(Last(Just(1)))
def test_mplus_with_nothing_and_just(self):
self.givenMonoids(Nothing, Last(Just(1)))
self.ensure_mconcat_equals(Last(Just(1)))
if __name__ == "__main__":
unittest.main()
| 33.373134 | 73 | 0.690966 | 6,246 | 0.931127 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.029964 |
6f0b8327462eef4971df182fcdc4e7e99669fd00 | 210 | py | Python | sborl/__init__.py | canonical/sborl | f821ecfcbf977d0605def66dca19ea5e8e39b5a3 | [
"Apache-2.0"
]
| null | null | null | sborl/__init__.py | canonical/sborl | f821ecfcbf977d0605def66dca19ea5e8e39b5a3 | [
"Apache-2.0"
]
| null | null | null | sborl/__init__.py | canonical/sborl | f821ecfcbf977d0605def66dca19ea5e8e39b5a3 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2022 Canonical Ltd.
# See LICENSE file for licensing details.
__version__ = "0.0.8"
# flake8: noqa: F401,F402
from . import errors, events, relation, testing
from .relation import EndpointWrapper
| 23.333333 | 47 | 0.761905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.495238 |
6f0bb8acf71ebb128d83c12c5909aa37ad5afe8a | 940 | py | Python | sizer.py | riffcc/librarian | f3cf8f4cc9f9a717e5f807a1d8558eb8c4e4d528 | [
"MIT"
]
| null | null | null | sizer.py | riffcc/librarian | f3cf8f4cc9f9a717e5f807a1d8558eb8c4e4d528 | [
"MIT"
]
| null | null | null | sizer.py | riffcc/librarian | f3cf8f4cc9f9a717e5f807a1d8558eb8c4e4d528 | [
"MIT"
]
| null | null | null | #!/usr/bin/python3
# Fetch torrent sizes
# TODO: Report number of files before we go etc
import os
from torrentool.api import Torrent
from fnmatch import fnmatch
root = '/opt/radio/collections'
pattern = "*.torrent"
alltorrentsize = 0
print("Thanks for using The Librarian.")
for path, subdirs, files in os.walk(root):
for name in files:
if fnmatch(name, pattern):
torrentstats = Torrent.from_file(os.path.join(path, name))
alltorrentsize += torrentstats.total_size
print('Torrent size ' + str(torrentstats.total_size) + ' for a total so far of ' + str(alltorrentsize))
print('DEBUG' + os.path.join(path, name))
# Reading filesize
my_torrent = Torrent.from_file('/opt/radio/collections/arienscompanymanuals/archive.org/download/collection_01_ariens_manuals/collection_01_ariens_manuals_archive.torrent')
size = my_torrent.total_size # Total files size in bytes.
print(size) | 34.814815 | 172 | 0.726596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 387 | 0.411702 |
6f0bf095397f81c3ceab712d5eed93ca0139a752 | 1,319 | py | Python | i_vis/core/login.py | piechottam/i-vis-core | 0b90300d1ae8b96d28a80802c1300dd861ad6f4e | [
"MIT"
]
| null | null | null | i_vis/core/login.py | piechottam/i-vis-core | 0b90300d1ae8b96d28a80802c1300dd861ad6f4e | [
"MIT"
]
| null | null | null | i_vis/core/login.py | piechottam/i-vis-core | 0b90300d1ae8b96d28a80802c1300dd861ad6f4e | [
"MIT"
]
| null | null | null | """ Flask LoginManager plugin.
Import and execute ``login.init_app(app)`` in a factory function to use.
"""
from typing import Any, Callable, TYPE_CHECKING
from functools import wraps
from flask import redirect, request, url_for, current_app
from flask_login import current_user
from flask_login.login_manager import LoginManager
from .errors import IllegalAccessError
if TYPE_CHECKING:
from werkzeug.wrappers import Response
login = LoginManager()
def admin_required(func: Callable) -> Callable:
"""Make view only accessible to admins.
Args:
func: Callabe to wrap.
Returns:
Wrapped callable - only callable when user is an admin.
"""
@wraps(func)
def decorated_view(*args: Any, **kwargs: Any) -> Any:
if not current_app.config.get("LOGIN_DISABLED", True) and (
current_user is None
or not current_user.is_authenticated
or not current_user.is_admin
):
# TODO
# move flash_permission_denied()
# move return redirect(url_for("main.index"))
raise IllegalAccessError
return func(*args, **kwargs)
return decorated_view
@login.unauthorized_handler
def unauthorized_callback() -> "Response":
return redirect(url_for("main.signin", next=request.path))
| 25.862745 | 72 | 0.686884 | 0 | 0 | 0 | 0 | 601 | 0.455648 | 0 | 0 | 398 | 0.301744 |
6f0cc8d81107fd93a3ad95d929b3e7cadc42e6cc | 10,078 | py | Python | code/App.py | KasinSparks/Arduino_RGB_Lights | 9c924ef3c7df2c7725c2178b42eb0f784168160c | [
"MIT"
]
| null | null | null | code/App.py | KasinSparks/Arduino_RGB_Lights | 9c924ef3c7df2c7725c2178b42eb0f784168160c | [
"MIT"
]
| null | null | null | code/App.py | KasinSparks/Arduino_RGB_Lights | 9c924ef3c7df2c7725c2178b42eb0f784168160c | [
"MIT"
]
| null | null | null | from tkinter import *
from ModeEnum import Mode
import SerialHelper
import Views.StaticView
import Views.CustomWidgets.Silder
from ColorEnum import Color
from functools import partial
from Views.CommandPanel import CommandPanel
from Views.ListItem import ListItem
from ProcessControl import ProcessManager, ProcessCommandEnum
import os, signal
menuBackgroundColor = "#262e30"
menuForegroundColor = "#e5e4c5"
menuActiveForegroundColor = menuForegroundColor
menuActiveBackgroundColor = "#464743"
mainBackgroundColor = "#1b2122"
class App(Frame):
def __init__(self,master=None):
Frame.__init__(self,master)
self.mode = Mode.Static
self.ser = SerialHelper.SerialHelper()
self.test = Views.StaticView.StaticView(self)
self.sliderRed = Views.CustomWidgets.Silder.Silder(self, "Red", color=Color.RED)
self.sliderGreen = Views.CustomWidgets.Silder.Silder(self, "Green", color=Color.GREEN)
self.sliderBlue = Views.CustomWidgets.Silder.Silder(self, "Blue", color=Color.BLUE)
self.grid()
self.createWidgets()
# Restart the RGB controller
#f = open("../config/processctl", "w")
#f.write("controller.py,start")
#f.close()
##ProcessManager.sendCommand("controller.py", ProcessCommandEnum.ProcessCommandEnum.START)
def createWidgets(self):
self.cPanel = CommandPanel()
self.quitButton= Button(self, text="Quit", command=self.quit)
self.quitButton.grid()
self.my_label = Label(self, text="My Label!")
self.my_label.grid()
self.connectedLabel = Label(self, text="Not Connected", foreground='red')
self.connectedLabel.grid()
self.test.grid()
self.tempText = Label(self, text="NONE")
self.tempText.grid()
self.addButton = Button(self, text="Add", command=self.addValues)
self.addButton.grid()
# TODO: change the value to reflect the item selected index
#self.addButton = Button(self, text="Add After Selected", command=partial(self.addValues, self.cPanel.getListItemIndex(self.cPanel._selectedItem)))
# Hacky way of doing this... listItem could be done better
self.addButton = Button(self,
text="Add After Selected",
command=partial(self.addValues, listItem='Not None'))
self.addButton.grid()
# TODO: Add at a random position
self.addButton = Button(self, text="Add At A Random Position", command=partial(self.addValues, random=True))
self.addButton.grid()
# test
self.sliderRed.grid(column=0, row=0)
self.sliderGreen.grid(column=1, row=0)
self.sliderBlue.grid(column=2, row=0)
self.delayAreaFrame = Frame(self)
self.delayAreaFrame.grid(column=3, row=0)
self.fadeValLabel = Label(self.delayAreaFrame, text="Fade Value:")
self.fadeValLabel.grid(column=0, row=0)
self.fadeVal = Entry(self.delayAreaFrame)
self.fadeVal.grid(column=0, row=1)
self.delayValLabel = Label(self.delayAreaFrame, text="Delay Value:")
self.delayValLabel.grid(column=0, row=3)
self.delayVal = Entry(self.delayAreaFrame)
self.delayVal.grid(column=0, row=4)
self.addDelayButton = Button(self.delayAreaFrame, text="Add Delay Value", command=self.addDelayValue)
self.addDelayButton.grid(column=1, row=3, rowspan=2)
self.cPanel.grid(column=4,row = 0)
#self.cPanel.insert(END, ListItem(self.cPanel, "Insert Test 1"))
self.my_menu = Menu(self,
tearoff=0,
activebackground=menuActiveBackgroundColor,
background=menuBackgroundColor,
activeforeground=menuActiveForegroundColor,
foreground=menuForegroundColor
)
#self.fileMenu = Menu(self.my_menu)
#self.fileMenu.add_command(label="Exit", command=self.quit)
self.my_menu.add_cascade(label="File", menu=self.fileMenu(self.my_menu))
self.my_menu.add_cascade(label="Ports", menu=self.portsMenu(self.my_menu))
self.my_menu.add_cascade(label="Mode", menu=self.modeMenu(self.my_menu))
def fileMenu(self, mainMenu):
fileMenu = Menu(mainMenu,
tearoff=0,
activebackground=menuActiveBackgroundColor,
background=menuBackgroundColor,
activeforeground=menuActiveForegroundColor,
foreground=menuForegroundColor
)
fileMenu.add_command(label="Exit", command=self.quit)
return fileMenu
def portsMenu(self, mainMenu):
portsMenu = Menu(mainMenu,
tearoff=0,
activebackground=menuActiveBackgroundColor,
background=menuBackgroundColor,
activeforeground=menuActiveForegroundColor,
foreground=menuForegroundColor
)
for sp in SerialHelper.getSerialPorts():
# Have this be a call to the function and supply the serial port as the arg
functionCall = partial(self.selectPort, sp[0], self.connectedLabel)
portsMenu.add_command(label=sp, command=functionCall)
return portsMenu
def selectPort(self, port, uiElement):
color = 'red'
text = 'Failed'
if self.ser.connect(port):
text = 'Connected on ' + port
color = 'green'
f = open("config/port", "w")
f.write(port)
f.close()
# Restart the RGB controller
##f = open("../config/processctl", "w")
##f.write("controller.py,restart")
##f.close()
ProcessManager.sendCommand("controller.py", ProcessCommandEnum.ProcessCommandEnum.RESTART)
uiElement['foreground'] = color
uiElement['text'] = text
def modeMenu(self, mainMenu):
menu = Menu(mainMenu,
tearoff=0,
activebackground=menuActiveBackgroundColor,
background=menuBackgroundColor,
activeforeground=menuActiveForegroundColor,
foreground=menuForegroundColor
)
for m in Mode:
funcCall = partial(self.changeMode, m)
menu.add_command(label=m, command=funcCall)
return menu
def changeMode(self, mode):
print("Mode changed from: "+ (str) (self.mode) + " to: " + (str) (mode))
self.mode = mode
loopingCondition = os.path.join(os.getcwd(), 'config', 'loopingCondition')
f = open(loopingCondition, 'w')
message = "LOOPING: "
if self.mode == Mode.Dynamic:
message += "TRUE;"
elif self.mode == Mode.Static:
message += "FALSE;"
f.write(message)
f.close()
def parseFadeValue(self):
fadeValStr = self.fadeVal.get()
try:
value = int(fadeValStr)
if value < 1 or value > 255:
print("Delay value out of byte range")
return 1
except ValueError as err:
print(err)
return 1
return value
def addValues(self, listItem=None, index=-1, random=False):
if index is None:
print("Index was None... Values not added.")
return
elif listItem is not None:
if self.cPanel._selectedItem is None:
print("No selected object... Value was not added.")
return
index = self.cPanel.getListItemIndex(self.cPanel._selectedItem) + 1
elif random:
index = self.cPanel.getRandomIndex()
tempString = self.paddNum(self.sliderRed.getValue()) + ',' + self.paddNum(self.sliderGreen.getValue()) + ',' + self.paddNum(self.sliderBlue.getValue()) + ',' + self.paddNum(self.parseFadeValue()) + ';'
self.tempText['text'] = tempString
#self.writeToFile(file="../config/command", text=tempString + '\n')
self.cPanel.addItem(tempString, index)
def addDelayValue(self):
# Check range of value
delayValStr = self.delayVal.get()
try:
value = int(delayValStr)
if value < 1 or value > 255:
print("Delay value out of byte range")
return -1
except ValueError as err:
print(err)
return -1
delayValStr = "DELAY: " + delayValStr
self.cPanel.addItem(delayValStr)
def paddNum(self, num=0):
if num > 255:
print("Fade number > 255. Defaulting to 000")
return "000"
paddedZeros = ""
# Generate the correct number of padding zeros
if num < 100:
paddedZeros += '0'
if num < 10:
paddedZeros += '0'
# Pad the number
paddedZeros += str(num)
return paddedZeros
def writeToFile(self, file=None, fileArgs='a', text=None):
if file is None:
print("No file to write to...")
return
f = open(file, fileArgs)
f.write(text)
#from SerialHelper import getSerialPorts
#for sp in getSerialPorts():
# print(sp)
# Start the app up!
app = App()
app.master.title("RGB Lights 3000")
app.master.config(menu=app.my_menu, background=mainBackgroundColor)
#subprocess.call(["./controller.py", "/dev/ttyUSB0"])
# Start up the app and the process manager
pid = os.fork()
if pid:
# parent
app.mainloop()
os.kill(pid, signal.SIGTERM)
else:
# child
exec(open("./code/ProcessControl/ProcessManager.py").read())
#os.execlp("python3", "python3", "./ProcessControl/ProcessManager.py")
#os.system("controller.py")
#app.mainloop()
#print("here") | 32.509677 | 209 | 0.594066 | 8,899 | 0.883013 | 0 | 0 | 0 | 0 | 0 | 0 | 2,033 | 0.201727 |
6f0d7bbee7a9caaa60cc0549c015512769c48c45 | 4,944 | py | Python | tests/io/product/test_sidd_writing.py | ngageoint/SarPy | a21ebfe136833e3d25cac4e5ebfd534f28538db4 | [
"MIT"
]
| null | null | null | tests/io/product/test_sidd_writing.py | ngageoint/SarPy | a21ebfe136833e3d25cac4e5ebfd534f28538db4 | [
"MIT"
]
| null | null | null | tests/io/product/test_sidd_writing.py | ngageoint/SarPy | a21ebfe136833e3d25cac4e5ebfd534f28538db4 | [
"MIT"
]
| null | null | null | import os
import json
import tempfile
import shutil
import unittest
from sarpy.io.complex.sicd import SICDReader
from sarpy.io.product.sidd import SIDDReader
from sarpy.io.product.sidd_schema import get_schema_path
from sarpy.processing.sidd.sidd_product_creation import create_detected_image_sidd, create_dynamic_image_sidd, create_csi_sidd
from sarpy.processing.ortho_rectify import NearestNeighborMethod
from tests import parse_file_entry
try:
from lxml import etree
except ImportError:
etree = None
product_file_types = {}
this_loc = os.path.abspath(__file__)
file_reference = os.path.join(os.path.split(this_loc)[0], 'product_file_types.json') # specifies file locations
if os.path.isfile(file_reference):
with open(file_reference, 'r') as fi:
the_files = json.load(fi)
for the_type in the_files:
valid_entries = []
for entry in the_files[the_type]:
the_file = parse_file_entry(entry)
if the_file is not None:
valid_entries.append(the_file)
product_file_types[the_type] = valid_entries
sicd_files = product_file_types.get('SICD', [])
def check_versus_schema(input_nitf, the_schema):
reader = SIDDReader(input_nitf)
sidd_bytes = reader.nitf_details.get_des_bytes(0)
xml_doc = etree.fromstring(sidd_bytes)
xml_schema = etree.XMLSchema(file=the_schema)
return xml_schema.validate(xml_doc)
class TestSIDDWriting(unittest.TestCase):
@unittest.skipIf(len(sicd_files) == 0, 'No sicd files found')
def test_sidd_creation(self):
for fil in sicd_files:
reader = SICDReader(fil)
ortho_helper = NearestNeighborMethod(reader)
# create a temp directory
temp_directory = tempfile.mkdtemp()
sidd_files = []
# create a basic sidd detected image
with self.subTest(msg='Create version 1 detected image for file {}'.format(fil)):
create_detected_image_sidd(
ortho_helper, temp_directory, output_file='di_1.nitf', version=1)
sidd_files.append('di_1.nitf')
with self.subTest(msg='Create version 2 detected image for file {}'.format(fil)):
create_detected_image_sidd(
ortho_helper, temp_directory, output_file='di_2.nitf', version=2)
sidd_files.append('di_2.nitf')
# create a csi image
with self.subTest(msg='Create version 1 csi for file {}'.format(fil)):
create_csi_sidd(
ortho_helper, temp_directory, output_file='csi_1.nitf', version=1)
sidd_files.append('csi_1.nitf')
with self.subTest(msg='Create version 2 csi for file {}'.format(fil)):
create_csi_sidd(
ortho_helper, temp_directory, output_file='csi_2.nitf', version=2)
sidd_files.append('csi_2.nitf')
# create a dynamic image
with self.subTest(msg='Create version 1 subaperture stack for file {}'.format(fil)):
create_dynamic_image_sidd(
ortho_helper, temp_directory, output_file='sast_1.nitf', version=1, frame_count=3)
sidd_files.append('sast_1.nitf')
with self.subTest(msg='Create version 2 subaperture stack for file {}'.format(fil)):
create_dynamic_image_sidd(
ortho_helper, temp_directory, output_file='sast_2.nitf', version=2, frame_count=3)
sidd_files.append('sast_2.nitf')
# check that each sidd structure serialized according to the schema
if etree is not None:
for vers in [1, 2]:
schema = get_schema_path('urn:SIDD:{}.0.0'.format(vers))
the_fil = 'di_{}.nitf'.format(vers)
if the_fil in sidd_files:
self.assertTrue(
check_versus_schema(os.path.join(temp_directory, the_fil), schema),
'Detected image version {} structure not valid versus schema {}'.format(vers, schema))
the_fil = 'csi_{}.nitf'.format(vers)
if the_fil in sidd_files:
self.assertTrue(
check_versus_schema(os.path.join(temp_directory, the_fil), schema),
'csi version {} structure not valid versus schema {}'.format(vers, schema))
the_fil = 'sast_{}.nitf'.format(vers)
if the_fil in sidd_files:
self.assertTrue(
check_versus_schema(os.path.join(temp_directory, the_fil), schema),
'Dynamic image version {} structure not valid versus schema {}'.format(vers, schema))
# clean up the temporary directory
shutil.rmtree(temp_directory)
| 44.142857 | 126 | 0.619539 | 3,505 | 0.70894 | 0 | 0 | 3,458 | 0.699434 | 0 | 0 | 921 | 0.186286 |
6f0f9bbc343ebc2f491e5e0fa189894eb08c5ad7 | 28,213 | py | Python | src/westpa/tools/wipi.py | burntyellow/adelman_ci | cca251a51b34843faed0275cce01d7a307829993 | [
"MIT"
]
| null | null | null | src/westpa/tools/wipi.py | burntyellow/adelman_ci | cca251a51b34843faed0275cce01d7a307829993 | [
"MIT"
]
| null | null | null | src/westpa/tools/wipi.py | burntyellow/adelman_ci | cca251a51b34843faed0275cce01d7a307829993 | [
"MIT"
]
| null | null | null | import numpy as np
import scipy.sparse as sp
from westpa.tools import Plotter
# A useful dataclass used as a wrapper for w_ipa to facilitate
# ease-of-use in ipython/jupyter notebooks/sessions.
# It basically just wraps up numpy arrays and dicts.
class WIPIDataset(object):
def __init__(self, raw, key):
self.__dict__ = {}
self.raw = raw
self.name = key
def __repr__(self):
if isinstance(self.__dict__['raw'], dict):
return repr(self.__dir__())
else:
return repr(self.raw)
def __getitem__(self, value):
if not isinstance(value, str):
return self.__dict__['raw'][value]
if value in list(self.__dict__['raw'].keys()):
return self.__dict__['raw'][value]
elif value in list(self.__dict__.keys()):
return self.__dict__[value]
def __setitem__(self, key, value):
self.__dict__[key] = value
def __getattr__(self, value):
# Check if it's an attribute of the underlying datatype.
# If not, just use the getitem function.
if value in dir(self.__dict__['raw']):
return getattr(self.__dict__['raw'], value)
else:
return self.__getitem__(value)
def __setattr__(self, key, value):
self.__dict__[key] = value
def __dir__(self):
dict_keys = list(self.__dict__.keys())
remove = ['raw', 'name', '__dict__', 'plotter']
for i in remove:
try:
dict_keys.remove(str(i))
except:
pass
# We don't enforce that this is a dictionary.
if isinstance(self.__dict__['raw'], dict):
return sorted(set(list(self.raw.keys()) + dict_keys))
else:
return sorted(set(dict_keys))
def keys(self):
print(self.__dir__())
# We want to override the basic math functions, now, so... this is only valid for numpy sets.
def __add__(self, other):
return self.__dict__['raw'] + other
def __radd__(self, other):
return other + self.__dict__['raw']
def __sub__(self, other):
return self.__dict__['raw'] - other
def __rsub__(self, other):
return other - self.__dict__['raw']
def __mul__(self, other):
return self.__dict__['raw'] * other
def __rmul__(self, other):
return other * self.__dict__['raw']
def __div__(self, other):
return self.__dict__['raw'] / other
def __floordiv__(self, other):
return self.__dict__['raw'] // other
def __rdiv__(self, other):
return other / self.__dict__['raw']
def __mod__(self, other):
return self.__dict__['raw'] % other
def __pow__(self, other):
return self.__dict__['raw'] ** other
def __lshift__(self, other):
return self.__dict__['raw'] << other
def __rshift__(self, other):
return self.__dict__['raw'] >> other
def __and__(self, other):
return self.__dict__['raw'] & other
def __eq__(self, other):
return self.__dict__['raw'] == other
def __ne__(self, other):
return self.__dict__['raw'] != other
def __lt__(self, other):
return self.__dict__['raw'] < other
def __gt__(self, other):
return self.__dict__['raw'] > other
def __le__(self, other):
return self.__dict__['raw'] <= other
def __ge__(self, other):
return self.__dict__['raw'] >= other
def __xor__(self, other):
return self.__dict__['raw'] ^ other
def __or__(self, other):
return self.__dict__['raw'] | other
#def __iadd__(self, other):
# return self.__dict__['raw'] += other
#def __isub__(self, other):
# return self.__dict__['raw'] -= other
#def __imul__(self, other):
# return self.__dict__['raw'] *= other
#def __idiv__(self, other):
# return self.__dict__['raw'] /= other
#def __ifloordiv__(self, other):
# return self.__dict__['raw'] //= other
#def __imod__(self, other):
# return self.__dict__['raw'] %= other
#def __ipow__(self, other):
# return self.__dict__['raw'] **= other
#def __ilshift__(self, other):
# return self.__dict__['raw'] <<= other
#def __irshift__(self, other):
# return self.__dict__['raw'] >>= other
#def __iand__(self, other):
# return self.__dict__['raw'] &= other
#def __ixor__(self, other):
# return self.__dict__['raw'] ^= other
#def __ior__(self, other):
# return self.__dict__['raw'] |= other
# Similar to the above, but slightly expanded to contain information from analysis files.
class KineticsIteration(object):
def __init__(self, kin_h5file, index, assign, iteration=-1):
self.__dict__ = {}
self.h5file = kin_h5file
# Keys:
_2D_h5keys = [ 'conditional_flux_evolution', 'rate_evolution' ]
_1D_h5keys = [ 'state_pop_evolution', 'color_prob_evolution', 'target_flux_evolution' ]
for key in _2D_h5keys:
try:
self.__dict__[key] = self.__2D_with_error__(key, index, assign)
except:
self.__dict__[key] = None
for key in _1D_h5keys:
try:
self.__dict__[key] = self.__1D_with_error__(key, index, assign)
except:
self.__dict__[key] = None
try:
self.__dict__['total_fluxes'] = WIPIDataset(raw=np.array(self.h5file['total_fluxes']), key='total_fluxes')
# We'll have to update this to make things better...
#self.__dict__['total_fluxes'].plotter = Plotter(self.h5file['total_fluxes'][...], 'Total Fluxes', iteration=iteration, interface='text')
#self.__dict__['total_fluxes'].plot = self.__dict__['total_fluxes'].plotter.plot
except:
pass
def __repr__(self):
return repr(self.__dir__())
def __getitem__(self, value):
if value in list(self.__dict__.keys()):
return self.__dict__[value]
def __setitem__(self, key, value):
self.__dict__[key] = value
def __getattr__(self, value):
if value in list(self.__dict__.keys()):
return self.__dict__[value]
def __setattr__(self, key, value):
self.__dict__[key] = value
def __dir__(self):
dict_keys = list(self.__dict__.keys())
# We don't want to show the plotter class; just the plot function
remove = [ 'h5file', '__dict__']
for i in remove:
try:
dict_keys.remove(str(i))
except:
pass
return sorted(set(dict_keys))
def keys(self):
print(self.__dir__())
# We seriously need to rename this.
# It's similar to the global WIPDataset, but has some nice pretty print functions.
class __custom_dataset__(object):
# This is just allow it to be indexed via properties.
# Not a huge thing, but whatever.
def __init__(self, raw, assign, key):
self.__dict__ = {}
self.raw = raw
self.name = key
self.assign = assign
self.nstates = assign.attrs['nstates']
self.dim = len(raw.shape)
def __repr__(self):
return repr(self.__dir__())
def __getitem__(self, value):
if value in self.__dict__['raw'].dtype.names:
return self.__dict__['raw'][value]
elif value in list(self.__dict__.keys()):
return self.__dict__[value]
def __setitem__(self, key, value):
self.__dict__[key] = value
def __getattr__(self, value):
if value in self.__dict__['raw'].dtype.names:
return self.__dict__['raw'][value]
elif value in list(self.__dict__.keys()):
return self.__dict__[value]
def __setattr__(self, key, value):
self.__dict__[key] = value
def __dir__(self):
dict_keys = list(self.__dict__.keys())
# We don't want to show the plotter class; just the plot function
remove = ['assign', 'dim', 'nstates', 'plotter', '__dict__']
for i in remove:
try:
dict_keys.remove(str(i))
except:
pass
return sorted(set(list(self.raw.dtype.names) + dict_keys))
def keys(self):
print(self.__dir__())
def _repr_pretty_(self, p, cycle):
if self.dim == 1:
return self._1D_repr_pretty_(p, cycle)
if self.dim == 2:
return self._2D_repr_pretty_(p, cycle)
def _1D_repr_pretty_(self, p, cycle):
# We're just using this as a way to print things in a pretty way. They can still be indexed appropriately.
# Stolen shamelessly from westtools/kinetics_tool.py
maxlabellen = max(list(map(len,self.assign['state_labels'])))
p.text('')
p.text('{name} data:\n'.format(name=self.name))
for istate in range(self.nstates):
p.text('{:{maxlabellen}s}: mean={:21.15e} CI=({:21.15e}, {:21.15e}) * tau^-1\n'
.format(self.assign['state_labels'][istate],
self.raw['expected'][istate],
self.raw['ci_lbound'][istate],
self.raw['ci_ubound'][istate],
maxlabellen=maxlabellen))
p.text('To access data, index via the following names:\n')
p.text(str(self.__dir__()))
return " "
def _2D_repr_pretty_(self, p, cycle):
# We're just using this as a way to print things in a pretty way. They can still be indexed appropriately.
# Stolen shamelessly from westtools/kinetics_tool.py
maxlabellen = max(list(map(len,self.assign['state_labels'])))
p.text('')
p.text('{name} data:\n'.format(name=self.name))
for istate in range(self.nstates):
for jstate in range(self.nstates):
if istate == jstate: continue
p.text('{:{maxlabellen}s} -> {:{maxlabellen}s}: mean={:21.15e} CI=({:21.15e}, {:21.15e}) * tau^-1\n'
.format(self.assign['state_labels'][istate], self.assign['state_labels'][jstate],
self.raw['expected'][istate, jstate],
self.raw['ci_lbound'][istate, jstate],
self.raw['ci_ubound'][istate, jstate],
maxlabellen=maxlabellen))
p.text('To access data, index via the following names:\n')
p.text(str(self.__dir__()))
return " "
def __2D_with_error__(self, h5key, index, assign):
# Check the start and stop, calculate the block size, and index appropriately.
# While we could try and automatically generate this above, it's a little more consistent to try it here.
# This should show the first block for which the current iteration has contributed data.
self.step_iter = (self.h5file[h5key]['iter_stop'][0] - self.h5file[h5key]['iter_start'][0])[1,0]
value = ((index-self.h5file.attrs['iter_start']) // self.step_iter)
if value < 0:
value = 0
raw = self.h5file[h5key][value, :, :]
error = (raw['ci_ubound'] - raw['ci_lbound']) / (2*raw['expected'])
expected = raw['expected']
raw = self.__custom_dataset__(raw, assign, h5key)
raw.error = error
raw.plotter = Plotter(self.h5file, h5key, iteration=value, interface='text')
raw.plot = raw.plotter.plot
return raw
def __1D_with_error__(self, h5key, index, assign):
self.step_iter = (self.h5file[h5key]['iter_stop'][0] - self.h5file[h5key]['iter_start'][0])[1]
value = ((index-self.h5file.attrs['iter_start']) // self.step_iter)
if value < 0:
value = 0
raw = self.h5file[h5key][value, :]
error = (raw['ci_ubound'] - raw['ci_lbound']) / (2*raw['expected'])
expected = raw['expected']
raw = self.__custom_dataset__(raw, assign, h5key)
raw.error = error
raw.plotter = Plotter(self.h5file, h5key, iteration=value, interface='text')
raw.plot = raw.plotter.plot
return raw
class __get_data_for_iteration__(object):
'''
All interesting data from an iteration (current/past). Whenever you change the scheme or iteration,
this dictionary is automatically updated. For the current iteration, it's keyed to the current seg_id.
For the past iteration, it's keyed to the seg_id in the CURRENT iteration such that:
w.current[X] & w.past[X]
returns information about seg_id X in the current iteration and information on seg_ID X's PARENT in the
preceding iteration.
Can be indexed via a seg_id, or like a dictionary with the following keys:
kinavg, weights, pcoord, auxdata (optional), parents, summary, seg_id, walkers, states, bins
kinavg, states, and bins refer to the output from w_kinavg and w_assign for this iteration
and analysis scheme. They are NOT dynamics bins, but the bins defined in west.cfg.
Has the following properties:
.minweight, .maxweight
which return all properties of the segment that matches those criteria in the selected iteration.
If you change the analysis scheme, so, too, will the important values.
'''
def __init__(self, parent, value, seg_ids = None):
'''
Initializes and sets the correct data.
'''
# We've classed this so that we can override some of the normal functions and allow indexing via seg_id
self.__dict__ = {}
# Is this function thread safe?
iter_group = parent.data_reader.get_iter_group(value)
#iter_group = parent.west['iterations/iter_{num:08d}'.format(num=value)]
self.parent = parent
current = {}
current['iteration'] = value
if seg_ids is None:
seg_ids = range(0, iter_group['seg_index']['weight'].shape[0])
# Just make these easier to access.
current['weights'] = iter_group['seg_index']['weight'][seg_ids]
current['pcoord'] = iter_group['pcoord'][...][seg_ids, :, :]
try:
current['auxdata'] = {}
for key in list(iter_group['auxdata'].keys()):
current['auxdata'][key] = iter_group['auxdata'][key][...][seg_ids, :]
except:
pass
current['parents'] = iter_group['seg_index']['parent_id'][seg_ids]
current['summary'] = parent.data_reader.data_manager.get_iter_summary(int(value))
current['seg_id'] = np.array(list(range(0, iter_group['seg_index'].shape[0])))[seg_ids]
current['walkers'] = current['summary']['n_particles']
current['states'] = parent.assign['trajlabels'][value-1, :current['walkers'], :][seg_ids]
current['bins'] = parent.assign['assignments'][value-1, :current['walkers'], :][seg_ids]
# Calculates the bin population for this iteration.
nbins = parent.assign['state_map'].shape[0]
# We have to take the 'unknown' state into account
nstates = parent.assign['state_labels'].shape[0] + 1
# Temporarily disabled while I sort out the fact that we shouldn't be using data from w_assign for state populations.
#current['plot'] = Plotter(parent.direct, parent.reweight, parent.iteration, parent.assign['bin_labels'], parent.assign['state_labels'], current['populations'].states, current['populations'].bins, parent.interface)
# Now we'll load up the results of the kinetics analysis.
current['direct'] = KineticsIteration(parent.direct, value, parent.assign, value)
evolution_datasets = [ 'rate_evolution', 'conditional_flux_evolution', 'state_pop_evolution', 'color_prob_evolution' , 'total_fluxes', 'target_flux_evolution']
# We want to load these up as... oh, who knows, I suppose?
try:
current['reweight'] = KineticsIteration(parent.reweight, value, parent.assign, value)
# We'll make this not a sparse matrix...
matrix = parent.reweight['iterations/iter_{:08d}'.format(value)]
# Assume color.
current['instant_matrix'] = sp.coo_matrix((matrix['flux'][...], (matrix['rows'][...], matrix['cols'][...])), shape=((nbins-1)*2, (nbins-1)*2)).todense()
reweighting = True
except:
# This analysis hasn't been enabled, so we'll simply return the default error message.
current['reweight'] = parent.reweight['rate_evolution']
current['instant_matrix'] = parent.reweight['bin_populations']
current['matrix'] = parent.reweight['bin_populations']
reweighting = False
# Check if the analysis has been enabled. If yes, make them specify dataset dictionaries. If not, return the thing.
if reweighting:
for key in evolution_datasets:
current[key] = WIPIDataset(raw={ 'direct': current['direct'][key], 'reweight': current['reweight'][key] }, key='a')
else:
for key in evolution_datasets:
current[key] = WIPIDataset(raw={ 'direct': current['direct'][key] }, key='direct')
self.raw = current
def __repr__(self):
'''
Returns the dictionary containing the iteration's values.
'''
return repr(self.__dir__())
def keys(self):
'''
Returns the keys function of the internal dictionary.
'''
return list(self.__dict__['raw'].keys())
def __setitem__(self, key, value):
self.__dict__[key] = value
def __getattr__(self, value):
if value in list(self.__dict__['raw'].keys()):
return self.__dict__['raw'][value]
elif value in list(self.__dict__.keys()):
return self.__dict__[value]
def __setattr__(self, key, value):
self.__dict__[key] = value
def __dir__(self):
dict_keys = list(self.__dict__.keys())
dict_keys += ['maxweight', 'minweight', 'walkers', 'aggregate_walkers', 'successful_trajectories']
remove = ['__dict__']
for i in remove:
try:
dict_keys.remove(str(i))
except:
pass
return sorted(set(list(self.__dict__['raw'].keys()) + dict_keys))
@property
def maxweight(self):
'''
Returns information about the segment which has the largest weight for this iteration.
'''
# Is there a faster or cleaner way to do this? Ah, maybe.
walker = np.where(self.raw['weights'] == np.max(self.raw['weights']))[0][0]
return self.__getitem__(walker)
@property
def minweight(self):
'''
Returns information about the segment which has the smallest weight for this iteration.
'''
walker = np.where(self.raw['weights'] == np.min(self.raw['weights']))[0][0]
return self.__getitem__(walker)
@property
def successful_trajectories(self):
'''
Returns which trajectories are successful.
'''
#walker = np.where(self.raw['weights'] == np.min(self.raw['weights']))[0][0]
# Find where we have a transition....
state_changes = np.where(self.raw['states'][:,:-1] != self.raw['states'][:,1:])
walkers = state_changes[0]
# The index of the state change.
new_states = state_changes[1] + 1
old_states = state_changes[1]
walker = {}
for z, (i, j) in enumerate(zip(old_states, new_states)):
#if self.raw['states'][walkers[z], i] == istate and self.raw['states'][walkers[z], j] == jstate:
istate = self.raw['states'][walkers[z], i]
jstate = self.raw['states'][walkers[z], j]
#print(z,i,j, istate, jstate)
try:
walker[istate,jstate].append(walkers[z])
except:
walker[istate,jstate] = [walkers[z]]
walker = WIPIDataset(raw=walker, key=None)
return walker
@property
def walkers(self):
'''
The number of walkers active in the current iteration.
'''
# Returns number of walkers for iteration X. Assumes current iteration, but can go with different one.
# Make this just... yeah, put this elsewhere.
return self.parent.west['summary']['n_particles'][self.iteration-1]
@property
def aggregate_walkers(self):
return self.parent.west['summary']['n_particles'][:self.iteration].sum()
def __getitem__(self, value):
'''
Responsible for handling whether this is treated like a dictionary of data sets, or an array of walker data.
'''
# Check to see if we're indexing via any of the active string types. We should probably break it down via string or int, instead of 'what exists and what doesn't', but it works for now.
active_items = ['kinavg', 'statepops', 'weights', 'pcoord', 'auxdata', 'parents', 'summary', 'seg_id', 'walkers', 'states', 'bins', 'populations', 'plot', 'instant_matrix', 'kinrw', 'matrix', 'rwstatepops']
#if value in active_items:
if isinstance(value, str):
# This should handle everything. Otherwise...
try:
return self.raw[value]
except:
print('{} is not a valid data structure.'.format(value))
elif isinstance(value, int) or isinstance(value, np.int64):
# Otherwise, we assume they're trying to index for a seg_id.
if value < self.walkers:
current = {}
current['plotter'] = {}
for i in ['pcoord']:
current[i] = WIPIDataset(raw=self.raw[i][value,:,:], key=i)
current[i].plotter = Plotter(self.raw[i][value,:,:], i, iteration=self.iteration, interface='text')
current[i].plot = current[i].plotter.plot
current['states'] = self.raw['states'][value, :]
current['bins'] = self.raw['bins'][value, :]
current['parents'] = self.raw['parents'][value]
current['seg_id'] = self.raw['seg_id'][value]
current['weights'] = self.raw['weights'][value]
try:
current['auxdata'] = {}
for key in list(self.raw['auxdata'].keys()):
current['auxdata'][key] = self.raw['auxdata'][key][value]
except:
pass
current = WIPIDataset(current, 'Segment {} in Iter {}'.format(value, self.iteration))
return current
else:
print('INVALID SEG_ID {}. SEG_ID should be less than {}.'.format(value, self.walkers))
# This handles the 'schemes', and all assorted data.
class WIPIScheme(object):
def __init__(self, scheme, name, parent, settings):
self.__dict__ = {}
self.raw = scheme
#self.name = parent._schemename
self.__analysis_schemes__ = scheme
self.iteration = parent.iteration
self.__dict__['name'] = None
self.__settings = settings
# Are these necessary? We'll try to edit these out.
self.parent = parent
self.data_reader = parent.data_reader
def __setattr__(self, key, value):
self.__dict__[key] = value
def __repr__(self):
return self.__str__()
def __str__(self):
# Right now, this returns w.scheme, NOT necessarily what we're pulling from...
# So you can rely on this, but it's confusing.
if self.name!= None:
# Set it to None, then return the original value.
rtn_string = self.name
self.name = None
return rtn_string
else:
return str(self.scheme)
def __getitem__(self, value):
if not isinstance(value, str):
for ischeme, schemename in enumerate(self.__dict__['raw'].keys()):
if ischeme == value:
value = schemename
# Check for some weird Ipython stuff.
if '_ipython' in value:
return self
self.name = None
if value in list(self.__dict__['raw'].keys()):
# If we have it in there...
self.name = value
return self
elif value in list(self.__dict__.keys()):
self.name = value
return self
elif value in self.__dir__():
self.name = value
return self
def __getattr__(self, value):
return self.__getitem__(value)
def __dir__(self):
dict_keys = ['assign', 'direct', 'state_labels', 'bin_labels', 'west', 'reweight', 'current', 'past', 'iteration']
if self.name != None:
return sorted(set(dict_keys))
else:
return sorted(set(self.__analysis_schemes__.keys()))
@property
def scheme(self):
self.name = None
return self.parent._schemename
@property
def list_schemes(self):
'''
Lists what schemes are configured in west.cfg file.
Schemes should be structured as follows, in west.cfg:
west:
system:
analysis:
directory: analysis
analysis_schemes:
scheme.1:
enabled: True
states:
- label: unbound
coords: [[7.0]]
- label: bound
coords: [[2.7]]
bins:
- type: RectilinearBinMapper
boundaries: [[0.0, 2.80, 7, 10000]]
'''
print("The following schemes are available:")
print("")
for ischeme, scheme in enumerate(self.__settings['analysis_schemes']):
print('{}. Scheme: {}'.format(ischeme, scheme))
print("")
print("Set via name, or via the index listed.")
print("")
print("Current scheme: {}".format(self.scheme))
@property
def iteration(self):
return self.parent.iteration
@property
def assign(self):
return self.__analysis_schemes__[str(self.name)]['assign']
@property
def direct(self):
'''
The output from w_direct.py from the current scheme.
'''
return self.__analysis_schemes__[str(self.name)]['direct']
@property
def state_labels(self):
print("State labels and definitions!")
for istate, state in enumerate(self.assign['state_labels']):
print('{}: {}'.format(istate, state))
print('{}: {}'.format(istate+1, 'Unknown'))
@property
def bin_labels(self):
print("Bin definitions! ")
for istate, state in enumerate(self.assign['bin_labels']):
print('{}: {}'.format(istate, state))
@property
def west(self):
return self.data_reader.data_manager.we_h5file
@property
def reweight(self):
# Need to fix this...
if self.__settings['analysis_schemes'][str(self.name)]['postanalysis'] == True:
return self.__analysis_schemes__[str(self.name)]['reweight']
else:
value = "This sort of analysis has not been enabled."
current = { 'bin_prob_evolution': value, 'color_prob_evolution': value, 'conditional_flux_evolution': value, 'rate_evolution': value, 'state_labels': value, 'state_prob_evolution': value }
current.update({ 'bin_populations': value, 'iterations': value })
return current
@property
def current(self):
'''
The current iteration. See help for __get_data_for_iteration__
'''
return __get_data_for_iteration__(value=self.iteration, parent=self)
@property
def past(self):
'''
The previous iteration. See help for __get_data_for_iteration__
'''
if self.iteration > 1:
return __get_data_for_iteration__(value=self.iteration - 1, seg_ids=self.current['parents'], parent=self)
else:
print("The current iteration is 1; there is no past.")
| 43.073282 | 222 | 0.589267 | 27,811 | 0.985751 | 0 | 0 | 5,341 | 0.18931 | 0 | 0 | 10,373 | 0.367667 |
6f0fd9711f448e832198d3798ba9ecf322599507 | 680 | py | Python | src/M5_random_module.py | posguy99/comp660-fall2020 | 0fbf5b660fe8863bf9754b5227fe47dd03dc2291 | [
"MIT"
]
| null | null | null | src/M5_random_module.py | posguy99/comp660-fall2020 | 0fbf5b660fe8863bf9754b5227fe47dd03dc2291 | [
"MIT"
]
| null | null | null | src/M5_random_module.py | posguy99/comp660-fall2020 | 0fbf5b660fe8863bf9754b5227fe47dd03dc2291 | [
"MIT"
]
| null | null | null | import random
# use of the random module
print(random.random()) # a float value >= 0.0 and < 1.0
print(random.random()*100) # a float value >= 0.0 and < 100.0
# use of the randint method
print(random.randint(1, 100)) # an int from 1 to 100
print(random.randint(101, 200)) # an int from 101 to 200
print(random.randint(0, 7)) # an int from 0 7
die1 = random.randint(1, 6)
die2 = random.randint(1, 6)
print("Your roll: ", die1, die2)
print(random.randrange(1, 100)) # an int from 1 to 99
print(random.randrange(100, 200, 2)) # an even int from 100 to 198
print(random.randrange(11, 250, 2)) # an odd int from 11 to 249
| 35.789474 | 73 | 0.627941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.4 |
6f0fe7aa9178367d1e8da95885ff8667f686cebb | 1,385 | py | Python | lnt/graphics/styles.py | flotwig/lnt | 2f4ab3d051508801b521f5da39f0cf522c54a96e | [
"MIT"
]
| 7 | 2020-02-21T23:43:10.000Z | 2021-07-06T11:16:37.000Z | lnt/graphics/styles.py | arshbot/lntools | 9c6f344452323ff93b7a6a3763697d2ad81b4961 | [
"MIT"
]
| 19 | 2019-08-07T18:00:13.000Z | 2020-12-03T17:21:01.000Z | lnt/graphics/styles.py | arshbot/lntools | 9c6f344452323ff93b7a6a3763697d2ad81b4961 | [
"MIT"
]
| 1 | 2019-11-05T21:38:29.000Z | 2019-11-05T21:38:29.000Z | from PyInquirer import style_from_dict, Token, prompt, Separator
from lnt.graphics.utils import vars_to_string
# Mark styles
prompt_style = style_from_dict({
Token.Separator: '#6C6C6C',
Token.QuestionMark: '#FF9D00 bold',
#Token.Selected: '', # default
Token.Selected: '#5F819D',
Token.Pointer: '#FF9D00 bold',
Token.Instruction: '', # default
Token.Answer: '#5F819D bold',
Token.Question: '',
})
# Mark prompt configurations
def get_channel_choice_from(channels):
choices = [ {'name' : vars_to_string(c_id, c['local_balance'], c['remote_balance'], nick=None) } for c_id, c in channels.items() ]
validate = lambda answer: 'You must choose at least one channel' if len(answer) == 0 else True
return {
"type" : "checkbox",
"qmark": "⚡️",
"message" : "CHOOSE FROM nick, channel id, local_balance, remote_balace, graphic",
"name" : "channel_choices_from",
"choices" : choices,
"validate" : validate,
}
def get_channel_choice_to(channels):
choices = [ {'name' : vars_to_string(c_id, c['local_balance'],
c['remote_balance'], nick=None) } for c_id, c in channels.items() ]
return {
'type': 'list',
'message': 'CHOOSE TO nick, channel id, local_balance, remote_balace, graphic',
"name" : "channel_choices_to",
'choices': choices
}
| 30.108696 | 134 | 0.639711 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 536 | 0.385889 |
6f10007c40e440e0d8097efa2d2333808b818d8f | 25,327 | py | Python | dvrip.py | jackkum/python-dvr | c004606ff8a37a213715fbc835cef77add0b3014 | [
"MIT"
]
| 149 | 2018-04-04T18:46:43.000Z | 2022-03-07T18:27:52.000Z | dvrip.py | jackkum/python-dvr | c004606ff8a37a213715fbc835cef77add0b3014 | [
"MIT"
]
| 20 | 2018-09-05T13:10:29.000Z | 2022-03-28T12:56:36.000Z | dvrip.py | jackkum/python-dvr | c004606ff8a37a213715fbc835cef77add0b3014 | [
"MIT"
]
| 51 | 2018-05-29T02:10:04.000Z | 2022-02-23T14:24:11.000Z | import os
import struct
import json
from time import sleep
import hashlib
import threading
from socket import socket, AF_INET, SOCK_STREAM, SOCK_DGRAM
from datetime import *
from re import compile
import time
import logging
class SomethingIsWrongWithCamera(Exception):
pass
class DVRIPCam(object):
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
CODES = {
100: "OK",
101: "Unknown error",
102: "Unsupported version",
103: "Request not permitted",
104: "User already logged in",
105: "User is not logged in",
106: "Username or password is incorrect",
107: "User does not have necessary permissions",
203: "Password is incorrect",
511: "Start of upgrade",
512: "Upgrade was not started",
513: "Upgrade data errors",
514: "Upgrade error",
515: "Upgrade successful",
}
QCODES = {
"AuthorityList": 1470,
"Users": 1472,
"Groups": 1474,
"AddGroup": 1476,
"ModifyGroup": 1478,
"DelGroup": 1480,
"AddUser": 1482,
"ModifyUser": 1484,
"DelUser": 1486,
"ModifyPassword": 1488,
"AlarmInfo": 1504,
"AlarmSet": 1500,
"ChannelTitle": 1046,
"EncodeCapability": 1360,
"General": 1042,
"KeepAlive": 1006,
"OPMachine": 1450,
"OPMailTest": 1636,
"OPMonitor": 1413,
"OPNetKeyboard": 1550,
"OPPTZControl": 1400,
"OPSNAP": 1560,
"OPSendFile": 0x5F2,
"OPSystemUpgrade": 0x5F5,
"OPTalk": 1434,
"OPTimeQuery": 1452,
"OPTimeSetting": 1450,
"NetWork.NetCommon": 1042,
"OPNetAlarm": 1506,
"SystemFunction": 1360,
"SystemInfo": 1020,
}
KEY_CODES = {
"M": "Menu",
"I": "Info",
"E": "Esc",
"F": "Func",
"S": "Shift",
"L": "Left",
"U": "Up",
"R": "Right",
"D": "Down",
}
OK_CODES = [100, 515]
PORTS = {
"tcp": 34567,
"udp": 34568,
}
def __init__(self, ip, **kwargs):
self.logger = logging.getLogger(__name__)
self.ip = ip
self.user = kwargs.get("user", "admin")
hash_pass = kwargs.get("hash_pass")
self.hash_pass = kwargs.get("hash_pass", self.sofia_hash(kwargs.get("password", "")))
self.proto = kwargs.get("proto", "tcp")
self.port = kwargs.get("port", self.PORTS.get(self.proto))
self.socket = None
self.packet_count = 0
self.session = 0
self.alive_time = 20
self.alive = None
self.alarm = None
self.alarm_func = None
self.busy = threading.Condition()
def debug(self, format=None):
self.logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
if format:
formatter = logging.Formatter(format)
ch.setFormatter(formatter)
self.logger.addHandler(ch)
def connect(self, timeout=10):
try:
if self.proto == "tcp":
self.socket_send = self.tcp_socket_send
self.socket_recv = self.tcp_socket_recv
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.connect((self.ip, self.port))
elif self.proto == "udp":
self.socket_send = self.udp_socket_send
self.socket_recv = self.udp_socket_recv
self.socket = socket(AF_INET, SOCK_DGRAM)
else:
raise f"Unsupported protocol {self.proto}"
# it's important to extend timeout for upgrade procedure
self.timeout = timeout
self.socket.settimeout(timeout)
except OSError:
raise SomethingIsWrongWithCamera('Cannot connect to camera')
def close(self):
try:
self.alive.cancel()
self.socket.close()
except:
pass
self.socket = None
def udp_socket_send(self, bytes):
return self.socket.sendto(bytes, (self.ip, self.port))
def udp_socket_recv(self, bytes):
data, _ = self.socket.recvfrom(bytes)
return data
def tcp_socket_send(self, bytes):
try:
return self.socket.sendall(bytes)
except:
return None
def tcp_socket_recv(self, bufsize):
try:
return self.socket.recv(bufsize)
except:
return None
def receive_with_timeout(self, length):
received = 0
buf = bytearray()
start_time = time.time()
while True:
data = self.socket_recv(length - received)
buf.extend(data)
received += len(data)
if length == received:
break
elapsed_time = time.time() - start_time
if elapsed_time > self.timeout:
return None
return buf
def receive_json(self, length):
data = self.receive_with_timeout(length)
if data is None:
return {}
self.packet_count += 1
self.logger.debug("<= %s", data)
reply = json.loads(data[:-2])
return reply
def send(self, msg, data={}, wait_response=True):
if self.socket is None:
return {"Ret": 101}
# self.busy.wait()
self.busy.acquire()
if hasattr(data, "__iter__"):
data = bytes(json.dumps(data, ensure_ascii=False), "utf-8")
pkt = (
struct.pack(
"BB2xII2xHI",
255,
0,
self.session,
self.packet_count,
msg,
len(data) + 2,
)
+ data
+ b"\x0a\x00"
)
self.logger.debug("=> %s", pkt)
self.socket_send(pkt)
if wait_response:
reply = {"Ret": 101}
data = self.socket_recv(20)
if data is None or len(data) < 20:
return None
(
head,
version,
self.session,
sequence_number,
msgid,
len_data,
) = struct.unpack("BB2xII2xHI", data)
reply = self.receive_json(len_data)
self.busy.release()
return reply
def sofia_hash(self, password=""):
md5 = hashlib.md5(bytes(password, "utf-8")).digest()
chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
return "".join([chars[sum(x) % 62] for x in zip(md5[::2], md5[1::2])])
def login(self):
if self.socket is None:
self.connect()
data = self.send(
1000,
{
"EncryptType": "MD5",
"LoginType": "DVRIP-Web",
"PassWord": self.hash_pass,
"UserName": self.user,
},
)
if data is None or data["Ret"] not in self.OK_CODES:
return False
self.session = int(data["SessionID"], 16)
self.alive_time = data["AliveInterval"]
self.keep_alive()
return data["Ret"] in self.OK_CODES
def getAuthorityList(self):
data = self.send(self.QCODES["AuthorityList"])
if data["Ret"] in self.OK_CODES:
return data["AuthorityList"]
else:
return []
def getGroups(self):
data = self.send(self.QCODES["Groups"])
if data["Ret"] in self.OK_CODES:
return data["Groups"]
else:
return []
def addGroup(self, name, comment="", auth=None):
data = self.set_command(
"AddGroup",
{
"Group": {
"AuthorityList": auth or self.getAuthorityList(),
"Memo": comment,
"Name": name,
},
},
)
return data["Ret"] in self.OK_CODES
def modifyGroup(self, name, newname=None, comment=None, auth=None):
g = [x for x in self.getGroups() if x["Name"] == name]
if g == []:
print(f'Group "{name}" not found!')
return False
g = g[0]
data = self.send(
self.QCODES["ModifyGroup"],
{
"Group": {
"AuthorityList": auth or g["AuthorityList"],
"Memo": comment or g["Memo"],
"Name": newname or g["Name"],
},
"GroupName": name,
},
)
return data["Ret"] in self.OK_CODES
def delGroup(self, name):
data = self.send(
self.QCODES["DelGroup"],
{"Name": name, "SessionID": "0x%08X" % self.session,},
)
return data["Ret"] in self.OK_CODES
def getUsers(self):
data = self.send(self.QCODES["Users"])
if data["Ret"] in self.OK_CODES:
return data["Users"]
else:
return []
def addUser(
self, name, password, comment="", group="user", auth=None, sharable=True
):
g = [x for x in self.getGroups() if x["Name"] == group]
if g == []:
print(f'Group "{group}" not found!')
return False
g = g[0]
data = self.set_command(
"AddUser",
{
"User": {
"AuthorityList": auth or g["AuthorityList"],
"Group": g["Name"],
"Memo": comment,
"Name": name,
"Password": self.sofia_hash(password),
"Reserved": False,
"Sharable": sharable,
},
},
)
return data["Ret"] in self.OK_CODES
def modifyUser(
self, name, newname=None, comment=None, group=None, auth=None, sharable=None
):
u = [x for x in self.getUsers() if x["Name"] == name]
if u == []:
print(f'User "{name}" not found!')
return False
u = u[0]
if group:
g = [x for x in self.getGroups() if x["Name"] == group]
if g == []:
print(f'Group "{group}" not found!')
return False
u["AuthorityList"] = g[0]["AuthorityList"]
data = self.send(
self.QCODES["ModifyUser"],
{
"User": {
"AuthorityList": auth or u["AuthorityList"],
"Group": group or u["Group"],
"Memo": comment or u["Memo"],
"Name": newname or u["Name"],
"Password": "",
"Reserved": u["Reserved"],
"Sharable": sharable or u["Sharable"],
},
"UserName": name,
},
)
return data["Ret"] in self.OK_CODES
def delUser(self, name):
data = self.send(
self.QCODES["DelUser"],
{"Name": name, "SessionID": "0x%08X" % self.session,},
)
return data["Ret"] in self.OK_CODES
def changePasswd(self, newpass="", oldpass=None, user=None):
data = self.send(
self.QCODES["ModifyPassword"],
{
"EncryptType": "MD5",
"NewPassWord": self.sofia_hash(newpass),
"PassWord": oldpass or self.password,
"SessionID": "0x%08X" % self.session,
"UserName": user or self.user,
},
)
return data["Ret"] in self.OK_CODES
def channel_title(self, titles):
if isinstance(titles, str):
titles = [titles]
self.send(
self.QCODES["ChannelTitle"],
{
"ChannelTitle": titles,
"Name": "ChannelTitle",
"SessionID": "0x%08X" % self.session,
},
)
def channel_bitmap(self, width, height, bitmap):
header = struct.pack("HH12x", width, height)
self.socket_send(
struct.pack(
"BB2xII2xHI",
255,
0,
self.session,
self.packet_count,
0x041A,
len(bitmap) + 16,
)
+ header
+ bitmap
)
reply, rcvd = self.recv_json()
if reply and reply["Ret"] != 100:
return False
return True
def reboot(self):
self.set_command("OPMachine", {"Action": "Reboot"})
self.close()
def setAlarm(self, func):
self.alarm_func = func
def clearAlarm(self):
self.alarm_func = None
def alarmStart(self):
self.alarm = threading.Thread(
name="DVRAlarm%08X" % self.session,
target=self.alarm_thread,
args=[self.busy],
)
self.alarm.start()
return self.get_command("", self.QCODES["AlarmSet"])
def alarm_thread(self, event):
while True:
event.acquire()
try:
(
head,
version,
session,
sequence_number,
msgid,
len_data,
) = struct.unpack("BB2xII2xHI", self.socket_recv(20))
sleep(0.1) # Just for receive whole packet
reply = self.socket_recv(len_data)
self.packet_count += 1
reply = json.loads(reply[:-2])
if msgid == self.QCODES["AlarmInfo"] and self.session == session:
if self.alarm_func is not None:
self.alarm_func(reply[reply["Name"]], sequence_number)
except:
pass
finally:
event.release()
if self.socket is None:
break
def set_remote_alarm(self, state):
self.set_command(
"OPNetAlarm", {"Event": 0, "State": state},
)
def keep_alive(self):
ret = self.send(
self.QCODES["KeepAlive"],
{"Name": "KeepAlive", "SessionID": "0x%08X" % self.session},
)
if ret is None:
self.close()
return
self.alive = threading.Timer(self.alive_time, self.keep_alive)
self.alive.daemon = True
self.alive.start()
def keyDown(self, key):
self.set_command(
"OPNetKeyboard", {"Status": "KeyDown", "Value": key},
)
def keyUp(self, key):
self.set_command(
"OPNetKeyboard", {"Status": "KeyUp", "Value": key},
)
def keyPress(self, key):
self.keyDown(key)
sleep(0.3)
self.keyUp(key)
def keyScript(self, keys):
for k in keys:
if k != " " and k.upper() in self.KEY_CODES:
self.keyPress(self.KEY_CODES[k.upper()])
else:
sleep(1)
def ptz(self, cmd, step=5, preset=-1, ch=0):
CMDS = [
"DirectionUp",
"DirectionDown",
"DirectionLeft",
"DirectionRight",
"DirectionLeftUp",
"DirectionLeftDown",
"DirectionRightUp",
"DirectionRightDown",
"ZoomTile",
"ZoomWide",
"FocusNear",
"FocusFar",
"IrisSmall",
"IrisLarge",
"SetPreset",
"GotoPreset",
"ClearPreset",
"StartTour",
"StopTour",
]
# ptz_param = { "AUX" : { "Number" : 0, "Status" : "On" }, "Channel" : ch, "MenuOpts" : "Enter", "POINT" : { "bottom" : 0, "left" : 0, "right" : 0, "top" : 0 }, "Pattern" : "SetBegin", "Preset" : -1, "Step" : 5, "Tour" : 0 }
ptz_param = {
"AUX": {"Number": 0, "Status": "On"},
"Channel": ch,
"MenuOpts": "Enter",
"Pattern": "Start",
"Preset": preset,
"Step": step,
"Tour": 1 if "Tour" in cmd else 0,
}
return self.set_command(
"OPPTZControl", {"Command": cmd, "Parameter": ptz_param},
)
def set_info(self, command, data):
return self.set_command(command, data, 1040)
def set_command(self, command, data, code=None):
if not code:
code = self.QCODES[command]
return self.send(
code, {"Name": command, "SessionID": "0x%08X" % self.session, command: data}
)
def get_info(self, command):
return self.get_command(command, 1042)
def get_command(self, command, code=None):
if not code:
code = self.QCODES[command]
data = self.send(code, {"Name": command, "SessionID": "0x%08X" % self.session})
if data["Ret"] in self.OK_CODES and command in data:
return data[command]
else:
return data
def get_time(self):
return datetime.strptime(self.get_command("OPTimeQuery"), self.DATE_FORMAT)
def set_time(self, time=None):
if time is None:
time = datetime.now()
return self.set_command("OPTimeSetting", time.strftime(self.DATE_FORMAT))
def get_netcommon(self):
return self.get_command("NetWork.NetCommon")
def get_system_info(self):
return self.get_command("SystemInfo")
def get_general_info(self):
return self.get_command("General")
def get_encode_capabilities(self):
return self.get_command("EncodeCapability")
def get_system_capabilities(self):
return self.get_command("SystemFunction")
def get_camera_info(self, default_config=False):
"""Request data for 'Camera' from the target DVRIP device."""
if default_config:
code = 1044
else:
code = 1042
return self.get_command("Camera", code)
def get_encode_info(self, default_config=False):
"""Request data for 'Simplify.Encode' from the target DVRIP device.
Arguments:
default_config -- returns the default values for the type if True
"""
if default_config:
code = 1044
else:
code = 1042
return self.get_command("Simplify.Encode", code)
def recv_json(self, buf=bytearray()):
p = compile(b".*({.*})")
packet = self.socket_recv(0xFFFF)
if not packet:
return None, buf
buf.extend(packet)
m = p.search(buf)
if m is None:
return None, buf
buf = buf[m.span(1)[1] :]
return json.loads(m.group(1)), buf
def get_upgrade_info(self):
return self.get_command("OPSystemUpgrade")
def upgrade(self, filename="", packetsize=0x8000, vprint=None):
if not vprint:
vprint = lambda x: print(x)
data = self.set_command(
"OPSystemUpgrade", {"Action": "Start", "Type": "System"}, 0x5F0
)
if data["Ret"] not in self.OK_CODES:
return data
vprint("Ready to upgrade")
blocknum = 0
sentbytes = 0
fsize = os.stat(filename).st_size
rcvd = bytearray()
with open(filename, "rb") as f:
while True:
bytes = f.read(packetsize)
if not bytes:
break
header = struct.pack(
"BB2xII2xHI", 255, 0, self.session, blocknum, 0x5F2, len(bytes)
)
self.socket_send(header + bytes)
blocknum += 1
sentbytes += len(bytes)
reply, rcvd = self.recv_json(rcvd)
if reply and reply["Ret"] != 100:
vprint("Upgrade failed")
return reply
progress = sentbytes / fsize * 100
vprint(f"Uploaded {progress:.2f}%")
vprint("End of file")
pkt = struct.pack("BB2xIIxBHI", 255, 0, self.session, blocknum, 1, 0x05F2, 0)
self.socket_send(pkt)
vprint("Waiting for upgrade...")
while True:
reply, rcvd = self.recv_json(rcvd)
print(reply)
if not reply:
return
if reply["Name"] == "" and reply["Ret"] == 100:
break
while True:
data, rcvd = self.recv_json(rcvd)
print(reply)
if data is None:
vprint("Done")
return
if data["Ret"] in [512, 514, 513]:
vprint("Upgrade failed")
return data
if data["Ret"] == 515:
vprint("Upgrade successful")
self.socket.close()
return data
vprint(f"Upgraded {data['Ret']}%")
def reassemble_bin_payload(self, metadata={}):
def internal_to_type(data_type, value):
if data_type == 0x1FC or data_type == 0x1FD:
if value == 1:
return "mpeg4"
elif value == 2:
return "h264"
elif value == 3:
return "h265"
elif data_type == 0x1F9:
if value == 1 or value == 6:
return "info"
elif data_type == 0x1FA:
if value == 0xE:
return "g711a"
elif data_type == 0x1FE and value == 0:
return "jpeg"
return None
def internal_to_datetime(value):
second = value & 0x3F
minute = (value & 0xFC0) >> 6
hour = (value & 0x1F000) >> 12
day = (value & 0x3E0000) >> 17
month = (value & 0x3C00000) >> 22
year = ((value & 0xFC000000) >> 26) + 2000
return datetime(year, month, day, hour, minute, second)
length = 0
buf = bytearray()
start_time = time.time()
while True:
data = self.receive_with_timeout(20)
(
head,
version,
session,
sequence_number,
total,
cur,
msgid,
len_data,
) = struct.unpack("BB2xIIBBHI", data)
packet = self.receive_with_timeout(len_data)
frame_len = 0
if length == 0:
media = None
frame_len = 8
(data_type,) = struct.unpack(">I", packet[:4])
if data_type == 0x1FC or data_type == 0x1FE:
frame_len = 16
(media, metadata["fps"], w, h, dt, length,) = struct.unpack(
"BBBBII", packet[4:frame_len]
)
metadata["width"] = w * 8
metadata["height"] = h * 8
metadata["datetime"] = internal_to_datetime(dt)
if data_type == 0x1FC:
metadata["frame"] = "I"
elif data_type == 0x1FD:
(length,) = struct.unpack("I", packet[4:frame_len])
metadata["frame"] = "P"
elif data_type == 0x1FA:
(media, samp_rate, length) = struct.unpack(
"BBH", packet[4:frame_len]
)
elif data_type == 0x1F9:
(media, n, length) = struct.unpack("BBH", packet[4:frame_len])
# special case of JPEG shapshots
elif data_type == 0xFFD8FFE0:
return packet
else:
raise ValueError(data_type)
if media is not None:
metadata["type"] = internal_to_type(data_type, media)
buf.extend(packet[frame_len:])
length -= len(packet) - frame_len
if length == 0:
return buf
elapsed_time = time.time() - start_time
if elapsed_time > self.timeout:
return None
def snapshot(self, channel=0):
command = "OPSNAP"
self.send(
self.QCODES[command],
{
"Name": command,
"SessionID": "0x%08X" % self.session,
command: {"Channel": channel},
},
wait_response=False,
)
packet = self.reassemble_bin_payload()
return packet
def start_monitor(self, frame_callback, user={}, stream="Main"):
params = {
"Channel": 0,
"CombinMode": "NONE",
"StreamType": stream,
"TransMode": "TCP",
}
data = self.set_command("OPMonitor", {"Action": "Claim", "Parameter": params})
if data["Ret"] not in self.OK_CODES:
return data
self.send(
1410,
{
"Name": "OPMonitor",
"SessionID": "0x%08X" % self.session,
"OPMonitor": {"Action": "Start", "Parameter": params},
},
wait_response=False,
)
self.monitoring = True
while self.monitoring:
meta = {}
frame = self.reassemble_bin_payload(meta)
frame_callback(frame, meta, user)
def stop_monitor(self):
self.monitoring = False
| 31.5798 | 232 | 0.48774 | 25,099 | 0.990998 | 0 | 0 | 0 | 0 | 0 | 0 | 4,143 | 0.16358 |
6f1051aadde1f5582ce2b30a763b8cd2ec505a2e | 1,373 | py | Python | tests/test_renderer.py | 0xflotus/maildown | fa17ce6a29458da549a145741db8e5092def2176 | [
"MIT"
]
| 626 | 2019-05-08T22:34:45.000Z | 2022-03-31T07:29:35.000Z | tests/test_renderer.py | pythonthings/maildown | 4e0caf297bdf264ab5ead537eb45d20f187971a1 | [
"MIT"
]
| 12 | 2019-04-30T20:47:17.000Z | 2019-06-27T11:19:46.000Z | tests/test_renderer.py | pythonthings/maildown | 4e0caf297bdf264ab5ead537eb45d20f187971a1 | [
"MIT"
]
| 36 | 2019-05-08T23:50:41.000Z | 2021-07-30T17:46:24.000Z | import mock
from maildown import renderer
import mistune
import pygments
from pygments import lexers
from pygments.formatters import html
import premailer
import jinja2
def test_highlight_renderer(monkeypatch):
monkeypatch.setattr(mistune, "escape", mock.MagicMock())
monkeypatch.setattr(lexers, "get_lexer_by_name", mock.MagicMock())
monkeypatch.setattr(html, "HtmlFormatter", mock.MagicMock())
monkeypatch.setattr(pygments, "highlight", mock.MagicMock())
lexers.get_lexer_by_name.return_value = True
html.HtmlFormatter.return_value = {}
r = renderer.HighlightRenderer()
r.block_code("code")
mistune.escape.assert_called_with("code")
r.block_code("code", "python")
lexers.get_lexer_by_name.assert_called_with("python", stripall=True)
pygments.highlight.assert_called_with("code", True, {})
def test_generate_content(monkeypatch):
monkeypatch.setattr(mistune, "Markdown", mock.MagicMock())
monkeypatch.setattr(premailer, "transform", mock.MagicMock())
monkeypatch.setattr(renderer, "HighlightRenderer", mock.MagicMock())
monkeypatch.setattr(jinja2, "Template", mock.MagicMock())
renderer.HighlightRenderer.return_value = 1
premailer.transform.return_value = ""
jinja2.Template.render.return_value = ""
renderer.generate_content("")
mistune.Markdown.assert_called_with(renderer=1)
| 33.487805 | 72 | 0.758194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.108521 |
6f105f0927ad589737ae9605008d8f670158e4d5 | 1,423 | py | Python | practice/practice_4/main.py | Norbert2808/programming | 3dbab86718c1cee5efe3b4b92e4492f984c75ea2 | [
"Unlicense"
]
| null | null | null | practice/practice_4/main.py | Norbert2808/programming | 3dbab86718c1cee5efe3b4b92e4492f984c75ea2 | [
"Unlicense"
]
| null | null | null | practice/practice_4/main.py | Norbert2808/programming | 3dbab86718c1cee5efe3b4b92e4492f984c75ea2 | [
"Unlicense"
]
| null | null | null | from generator import *
from iterator import *
def nInput():
while True:
try:
n = int(input("Enter n(size): "))
if n <= 0:
print("Input must be a positive integer!")
continue
except ValueError:
print("Not the correct value n!")
continue
break
return n
def intInput(message):
while True:
try:
k = int(input(message))
except ValueError:
print("Not the correct value!")
continue
break
return k
def printGenerator(gen):
for i in gen:
print(i)
def printIterator(iter):
for i in range(0, n):
print(iter.__next__())
if __name__ == "__main__":
while True:
print("Enter 1, if you want to generate prime Lucas Number.")
print("Enter 2, if you want to iterate prime Lucas Number.")
print("Or 0, if you want to get out: ")
count = intInput("")
if count == 1:
n = nInput()
print("First " + str(n) + " prime Lucas Number:")
gen = generator(n)
printGenerator(gen)
elif count == 2:
n = nInput()
print("First " + str(n) + " prime Lucas Number:")
iter = IteratorLucasNumbers()
printIterator(iter)
elif count == 0:
break
else: print("Enter 1, or 2, or 0!") | 26.351852 | 69 | 0.51019 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 335 | 0.235418 |
6f11a287519a38fcf82e8d66f617304a1a4f570b | 688 | py | Python | setup.py | wgnet/grail | 1d8d22bebda758800cb9aa9027486053d568bc14 | [
"Apache-2.0"
]
| 37 | 2015-01-12T07:34:34.000Z | 2020-12-29T09:46:28.000Z | setup.py | wgnet/grail | 1d8d22bebda758800cb9aa9027486053d568bc14 | [
"Apache-2.0"
]
| 7 | 2015-04-10T14:55:34.000Z | 2021-04-28T10:00:47.000Z | setup.py | wgnet/grail | 1d8d22bebda758800cb9aa9027486053d568bc14 | [
"Apache-2.0"
]
| 17 | 2015-01-06T20:09:02.000Z | 2019-06-28T08:57:36.000Z | from setuptools import setup
version = '1.0.10'
setup(
name='grail',
version=version,
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
],
packages=[
'grail',
],
description='Grail is a library which allows test script creation based on steps. '
'It helps to structure your tests and get rid of additional test documentation for your code.',
include_package_data=True,
author='Wargaming.NET',
author_email='[email protected]',
url='https://github.com/wgnet/grail'
)
| 28.666667 | 111 | 0.640988 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 401 | 0.582849 |
6f123e344c537798141dc193f3e6368ab0209301 | 964 | py | Python | tests/test_free.py | qingyunha/boltdb | 2ea341336f02210f751cd49ea7724d890511db38 | [
"MIT"
]
| 7 | 2020-11-18T10:06:47.000Z | 2021-09-06T16:31:13.000Z | tests/test_free.py | qingyunha/boltdb | 2ea341336f02210f751cd49ea7724d890511db38 | [
"MIT"
]
| 1 | 2021-02-20T19:32:11.000Z | 2021-02-20T19:32:11.000Z | tests/test_free.py | qingyunha/boltdb | 2ea341336f02210f751cd49ea7724d890511db38 | [
"MIT"
]
| 2 | 2020-11-25T15:21:20.000Z | 2021-02-20T19:28:14.000Z | import os
import unittest
import tempfile
from boltdb import BoltDB
class TestFree(unittest.TestCase):
def setUp(self):
self.db = BoltDB(tempfile.mktemp())
def tearDown(self):
os.unlink(self.db.filename)
def test_free(self):
with self.db.update() as tx:
b = tx.bucket()
b.put(b"foo", b"bar")
self.assertEqual(self.db.freelist.ids, [3])
with self.db.update() as tx:
b = tx.bucket()
b.put(b"foo", b"bar")
self.assertEqual(self.db.freelist.ids, [4])
def test_free2(self):
self.assertEqual(self.db.freepages(), [2])
with self.db.update() as tx:
b = tx.bucket()
b.put(b"foo", b"bar")
self.assertEqual(sorted(self.db.freepages()), [2, 3])
with self.db.update() as tx:
b = tx.bucket()
b.put(b"foo", b"bar")
self.assertEqual(sorted(self.db.freepages()), [2, 4])
| 24.717949 | 61 | 0.551867 | 892 | 0.925311 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.049793 |
6f149a0dd9e45b60d9d630858342198ce7d83ebf | 1,709 | py | Python | xen/xen-4.2.2/tools/xm-test/tests/xapi/01_xapi-vm_basic.py | zhiming-shen/Xen-Blanket-NG | 47e59d9bb92e8fdc60942df526790ddb983a5496 | [
"Apache-2.0"
]
| 1 | 2018-02-02T00:15:26.000Z | 2018-02-02T00:15:26.000Z | xen/xen-4.2.2/tools/xm-test/tests/xapi/01_xapi-vm_basic.py | zhiming-shen/Xen-Blanket-NG | 47e59d9bb92e8fdc60942df526790ddb983a5496 | [
"Apache-2.0"
]
| null | null | null | xen/xen-4.2.2/tools/xm-test/tests/xapi/01_xapi-vm_basic.py | zhiming-shen/Xen-Blanket-NG | 47e59d9bb92e8fdc60942df526790ddb983a5496 | [
"Apache-2.0"
]
| 1 | 2019-05-27T09:47:18.000Z | 2019-05-27T09:47:18.000Z | #!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2006
# Author: Stefan Berger <[email protected]>
# Basic VM creation test
from XmTestLib import xapi
from XmTestLib.XenAPIDomain import XmTestAPIDomain
from XmTestLib import *
from xen.xend import XendAPIConstants
import commands
import os
try:
# XmTestAPIDomain tries to establish a connection to XenD
domain = XmTestAPIDomain()
except Exception, e:
SKIP("Skipping test. Error: %s" % str(e))
vm_uuid = domain.get_uuid()
session = xapi.connect()
domain.start(startpaused=True)
res = session.xenapi.VM.get_power_state(vm_uuid)
if res != XendAPIConstants.XEN_API_VM_POWER_STATE[XendAPIConstants.XEN_API_VM_POWER_STATE_PAUSED]:
FAIL("VM was not started in 'paused' state")
res = session.xenapi.VM.unpause(vm_uuid)
res = session.xenapi.VM.get_power_state(vm_uuid)
if res != XendAPIConstants.XEN_API_VM_POWER_STATE[XendAPIConstants.XEN_API_VM_POWER_STATE_RUNNING]:
FAIL("VM could not be put into 'running' state")
console = domain.getConsole()
try:
run = console.runCmd("cat /proc/interrupts")
except ConsoleError, e:
saveLog(console.getHistory())
FAIL("Could not access proc-filesystem")
res = session.xenapi.VM.pause(vm_uuid)
res = session.xenapi.VM.get_power_state(vm_uuid)
if res != XendAPIConstants.XEN_API_VM_POWER_STATE[XendAPIConstants.XEN_API_VM_POWER_STATE_PAUSED]:
FAIL("VM could not be put into 'paused' state")
res = session.xenapi.VM.unpause(vm_uuid)
res = session.xenapi.VM.get_power_state(vm_uuid)
if res != XendAPIConstants.XEN_API_VM_POWER_STATE[XendAPIConstants.XEN_API_VM_POWER_STATE_RUNNING]:
FAIL("VM could not be 'unpaused'")
domain.stop()
domain.destroy()
| 27.564516 | 99 | 0.774137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 432 | 0.252779 |
6f15c8f389db3a70f0c15433c6aa9fb9ab9b3570 | 369 | py | Python | formatter_sql.py | ZSCNetSupportDept/schedule-utils | e7ba0f34c7af77980758c9a0e9a82faca66f558c | [
"WTFPL"
]
| null | null | null | formatter_sql.py | ZSCNetSupportDept/schedule-utils | e7ba0f34c7af77980758c9a0e9a82faca66f558c | [
"WTFPL"
]
| null | null | null | formatter_sql.py | ZSCNetSupportDept/schedule-utils | e7ba0f34c7af77980758c9a0e9a82faca66f558c | [
"WTFPL"
]
| null | null | null | def format_sql(schedule, table):
for week, schedule in schedule.items():
print(f"UPDATE {table} SET `block`=0, `week`={week} WHERE `name`='{schedule.leader}';")
for block, staffs in schedule.staffs.items():
for staff in staffs:
print(f"UPDATE {table} SET `block`={block}, `week`={week} WHERE `name`='{staff}';")
pass
| 46.125 | 99 | 0.596206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.422764 |
6f162b9d147aaaf9aa9b58f1a839359e4e0bcd22 | 9,024 | py | Python | nlp_fourier.py | neitzke/stokes-numerics | 8845aef7598ca245d095cca690bf48568758a8c9 | [
"MIT"
]
| 1 | 2020-08-03T16:24:06.000Z | 2020-08-03T16:24:06.000Z | nlp_fourier.py | neitzke/stokes-numerics | 8845aef7598ca245d095cca690bf48568758a8c9 | [
"MIT"
]
| null | null | null | nlp_fourier.py | neitzke/stokes-numerics | 8845aef7598ca245d095cca690bf48568758a8c9 | [
"MIT"
]
| null | null | null | """Fourier transform non-linear Poisson solver"""
# This module is concerned with solving the "non-linear Poisson"
# equation
# Delta(u) = f(u,z)
# on a uniform rectangular mesh, with u = u0 on the boundary.
#
# We solve the equation by an iterative method, solving an
# approximation to the linearized equation at u_i to get u_{i+1} and
# terminating when u_{i+1} - u_i is small enough.
#
# The key feature of this solve is that we use a very coarse
# approximation of the linearization---chosen specifically so that it
# can be solved by Fourier transform methods. The coarse
# approxmination means that each iteration makes little progress
# toward the final solution, and many iterations are necessary.
# However, the availability of efficient FFT routines means that each
# iteration is very fast, and so in many cases there is a net gain
# compared to a direct method.
#
# The exact linearized equation for v = u-u0 is
# Delta(vdot) - d1F(v,z) vdot = F(v,z) - Delta(vdot) (*)
# where
# F(v,z) = f(u0+v,z) - Delta(u0)
# We rewrite (*) as
# (Delta - A)vdot = RHS
# This is exactly solvable by Fourier methods if A is a constant
# function.
#
# To approximate a solution, we replace A = d1F(v,z) by a constant
# that is in some way representative of its values on he grid points.
# We follow the suggestion of [1] to use the "minimax" value
#
# A = (max(d1F) + min(d1F)) / 2
#
# where max and min are taken over the grid.
#
# References
#
# [1] Concus, P. and Golub, G. H. 1973. Use of fast direct methods for
# the efficient numerical solution of nonseparable elliptic
# equations. SIAM J. Numer. Anal., 10: 1103-1103.
#
# KNOWN ISSUES:
#
# * The initialization code assumes that u_0 is harmonic in a
# neighborhood of the boundary of the mesh. This is not a
# fundamental requirement of the method, but because u_0 cannot be
# easily extended to a doubly-periodic function its Laplacian is
# computed by a finite difference scheme rather than by FFT methods.
# Being harmonic at the boundary allows us to simply zero out the
# Laplacian at the edges and ignore this issue.
#
# (Note that this assumption is satisfied for the applications to
# the self-duality equations for which this solver was developed0).
from __future__ import absolute_import
import numpy as np
import scipy.signal
from dst2 import dst2, idst2, dst2freq
from solverexception import SolverException
import time
import logging
logger = logging.getLogger(__name__)
def _max_power_2_dividing(n):
n = int(n)
return n & (~(n-1))
def _suggest_sizes(n):
if n == _max_power_2_dividing(n):
return [n-1]
a = np.log(n+1)/np.log(2.0)
return [2**int(np.floor(a))-1, 2**int(np.ceil(a))-1]
def _is_bad_size_for_dst(n):
return float(n+1) / _max_power_2_dividing(n+1) > 5.0
class NLPFourier(object):
"""Solve the system Delta(u) = f(u,z) on a uniform mesh, with u = u0
on boundary, using Fourier transform methods.
"""
def __init__(self,f,d1f,u0,grid,thresh=0.0000001,maxiter=5000,relax=1.0,linear=False):
"""Initialize and run the solver.
Parameters:
f : f(u,z)
d1f : [df/du](u,z)
u0 : initial guess and boundary conditon
grid : SquareGrid or similar object representing a rectangular
mesh (use zm, nx, ny, dx, dy attributes)
thresh : L^2 error goal for the solver
maxiter : raise exception if threshold not met after this many iterations
relax : Step by (relax)*(linearized solution) at each
iteration; setting to less than 1.0 may enlarge
domain of convergence at the cost of convergence
speed.
linear : is the equation to be solved actually linear?
(not used)
Return:
None
Output class attributes:
u : Solution u
u0 : Initial guess
"""
self._t0 = time.time()
self.f = f
self.d1f = d1f
self.grid = grid
self.u0func = u0
self.u0 = u0(self.grid.zm)
self.thresh = thresh
self.maxiter = maxiter
self.relax = relax
self.normcoef = self.grid.dx * self.grid.dy / (2.0*np.sqrt((self.grid.nx + 1)*(self.grid.ny+1)))
self.warn_if_bad_sizes()
# Capital K for unnormalized frequencies ("per index")
# Lower k for real frequencies ("per unit x or y")
KX, KY = dst2freq(self.u0)
self.kx = 2*np.pi*KX/self.grid.dx # a row
self.ky = 2*np.pi*KY/self.grid.dy # a column
self.ksq = self.kx**2 + self.ky**2 # broadcasted to a 2D array now
# Laplacian of initial guess and its transform
# (These are relatively long-running computations.)
# TODO 1: Remove implicit assumption that Delta(u0) vanishes on boundary.
# TODO 2: Move this finite difference stuff into its own module.
logger.info("Computing Laplacian of initial guess and its transform")
idx,idy = 1.0/self.grid.dx, 1.0/self.grid.dy
lap_stencil = np.array([[0,idy**2,0],[idx**2,-2*(idx**2 + idy**2),idx**2],[0,idy**2,0]],dtype=np.float64)
Lap_u0_raw = scipy.signal.convolve2d(self.u0,lap_stencil,mode='same')
self.Lap_u0 = np.zeros_like(Lap_u0_raw)
self.Lap_u0[2:-2,2:-2] = Lap_u0_raw[2:-2,2:-2]
self._t1 = time.time()
self._t = self._t1
logger.info("Solving PDE: %dx%d grid, thresh=%g" % (self.grid.nx,self.grid.ny,self.thresh))
self.u = self._iterate()
def _iterate(self):
"""Fourier solver main loop"""
vhat = np.zeros_like(self.u0)
n = 0
last_delta_norm = 0.0
while True:
n = n + 1
# Compute the DST of the RHS of the inhomogeneous linearized equation
v = idst2(vhat)
vvec = v.reshape((self.grid.nx * self.grid.ny, ))
Fvvec = self.F(vvec,self.grid.zv)
Fv = Fvvec.reshape((self.grid.nx, self.grid.ny))
Fv_hat = dst2(Fv)
Lapv_hat = -self.ksq * vhat
RHS_hat = Fv_hat - Lapv_hat
# Compute the L^2 norm of the inhomogeneous term
# ( = 0 iff we have a solution )
residual = self.L2norm_hat(RHS_hat)
now = time.time()
logger.info("PDE: iter=%d L2error=%g L2delta=%g\t(%.2fs)" % (n,residual,last_delta_norm,now-self._t))
self._t = now
if residual < self.thresh:
logger.info('PDE: success\t(%.2fs total; %.2fs in main loop)',now - self._t0,now - self._t1)
break
if np.any(np.isnan(RHS_hat)):
# Computing RHS revealed some failure in the computation
# (usually means the linearized solution at the previous step was bad)
raise SolverException("NAN encountered in RHS computation (overflow or underflow?)")
if n >= self.maxiter:
raise SolverException("Max iterations (%d) reached without meeting error threshold %g" % (self.maxiter,self.thresh))
# Solve a constant-coefficient approximation of the linear
# equation in frequency space.
# First compute the constant that approximates d1F.
a = self.minimax(self.d1F(vvec,self.grid.zv))
# Now compute the transform of the exact solution to this
# constant coef problem.
delta_vhat = RHS_hat / (-self.ksq - a)
last_delta_norm = self.L2norm_hat(delta_vhat)
# Update vhat by adding this approx solution of the
# linearization
vhat = vhat + self.relax * delta_vhat
self.iter = n
self.delta = last_delta_norm
self.error = residual
return self.u0 + v
def warn_if_bad_sizes(self):
msgstr = '%s-size %d is a bad choice for the fourier solver; this computation will be inefficient. Good sizes have the form (2**n)-1. Consider using size %s instead.'
if _is_bad_size_for_dst(self.grid.nx):
logger.warning(msgstr % ('x',self.grid.nx, ' or '.join(str(n) for n in _suggest_sizes(self.grid.nx))))
if _is_bad_size_for_dst(self.grid.ny):
logger.warning(msgstr % ('y',self.grid.ny, ' or '.join(str(n) for n in _suggest_sizes(self.grid.nx))))
def F(self,v,z):
u0vec = self.u0.reshape((self.grid.nx * self.grid.ny, ))
Lapu0vec = self.Lap_u0.reshape((self.grid.nx * self.grid.ny, ))
return self.f(u0vec + v,z) - Lapu0vec
def d1F(self,v,z):
u0vec = self.u0.reshape((self.grid.nx * self.grid.ny, ))
return self.d1f(u0vec + v,z)
def minimax(self,m):
"""Return the average of max(m) and min(m)"""
return 0.5*(np.max(m) + np.min(m))
def L2norm_hat(self,m):
"""L^2 norm of a function computed from its Fourier transform coefficients"""
return np.linalg.norm(m) * self.normcoef
| 38.729614 | 176 | 0.623559 | 6,212 | 0.688387 | 0 | 0 | 0 | 0 | 0 | 0 | 4,796 | 0.531472 |
6f177aacdeb67b4df7640983b24e1411fe279553 | 2,853 | py | Python | app/models/fragment.py | saury2013/Memento | dbb2031a5aff3064f40bcb5afe631de8724a547e | [
"MIT"
]
| null | null | null | app/models/fragment.py | saury2013/Memento | dbb2031a5aff3064f40bcb5afe631de8724a547e | [
"MIT"
]
| null | null | null | app/models/fragment.py | saury2013/Memento | dbb2031a5aff3064f40bcb5afe631de8724a547e | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from datetime import datetime
from sqlalchemy.dialects.mysql import LONGTEXT
from sqlalchemy.orm import load_only
from sqlalchemy import func
from flask import abort
from markdown import Markdown,markdown
from app.models import db,fragment_tags_table
from app.models.tag import Tag
from app.whoosh import search_helper
class Fragment(db.Model):
'''知识碎片'''
__tablename__ = 'fragment'
__table_args__ = {
"mysql_engine": "InnoDB",
"mysql_charset": "utf8"
}
id = db.Column(db.Integer,nullable=False,primary_key=True,autoincrement=True)
title = db.Column(db.String(255),nullable=False,default="",index=True)
access = db.Column(db.Integer,nullable=False,default=1)
status = db.Column(db.Integer,nullable=False,default=0)
markdown = db.deferred(db.Column(LONGTEXT,default="",nullable=False))
html = db.deferred(db.Column(LONGTEXT,default="",nullable=False))
publish_markdown = db.deferred(db.Column(LONGTEXT,default="",nullable=False))
publish_html = db.deferred(db.Column(LONGTEXT,default="",nullable=False))
publish_timestamp = db.Column(db.DateTime,default=datetime.now,nullable=False)
updatetime = db.Column(db.DateTime,default=datetime.now,nullable=False)
user_id = db.Column(db.Integer,db.ForeignKey('user.id'))
tags = db.relationship('Tag',secondary=fragment_tags_table,backref=db.backref('fragments'))
# branch = db.relationship('Branch',back_populates='fragment',uselist=False)
branch_id = db.Column(db.Integer,db.ForeignKey('branch.id'))
# branch = db.relationship('Branch',foreign_keys=branch_id)
def get(self,id):
return Fragment.query.get(id)
@staticmethod
def get_or_404(id):
fragment = Fragment.query.get(id)
if fragment:
return fragment
abort(404)
def save(self):
self.html = self.markdown2html(self.markdown)
db.session.add(self)
db.session.commit()
search_helper.add_document(self.title,str(self.id),self.markdown)
def markdown2html(self,content):
# md = Markdown(['codehilite', 'fenced_code', 'meta', 'tables'])
# html = md.convert(content)
html = markdown(content,extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
])
return html
@staticmethod
def get_nearest_fragments(num=5):
fragments = Fragment.query.filter().order_by(Fragment.updatetime.desc()).limit(num)
res = []
from app.models.branch import Branch
for fragment in fragments:
fragment.branch = Branch.get(fragment.branch_id)
res.append(fragment)
return res
| 38.04 | 95 | 0.660007 | 2,511 | 0.877665 | 0 | 0 | 500 | 0.174764 | 0 | 0 | 451 | 0.157637 |
6f18b3824b6daec3cd5fa315168eff3f33823b3f | 24,236 | py | Python | qatrack/qa/migrations/0001_initial.py | crcrewso/qatrackplus | b9da3bc542d9e3eca8b7291bb631d1c7255d528e | [
"MIT"
]
| 20 | 2021-03-11T18:37:32.000Z | 2022-03-23T19:38:07.000Z | qatrack/qa/migrations/0001_initial.py | crcrewso/qatrackplus | b9da3bc542d9e3eca8b7291bb631d1c7255d528e | [
"MIT"
]
| 75 | 2021-02-12T02:37:33.000Z | 2022-03-29T20:56:16.000Z | qatrack/qa/migrations/0001_initial.py | crcrewso/qatrackplus | b9da3bc542d9e3eca8b7291bb631d1c7255d528e | [
"MIT"
]
| 5 | 2021-04-07T15:46:53.000Z | 2021-09-18T16:55:00.000Z | # -*- coding: utf-8 -*-
from django.db import migrations, models
import django.utils.timezone
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('auth', '0006_require_contenttypes_0002'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('units', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AutoReviewRule',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('pass_fail', models.CharField(unique=True, max_length=15, choices=[(b'not_done', b'Not Done'), (b'ok', b'OK'), (b'tolerance', b'Tolerance'), (b'action', b'Action'), (b'no_tol', b'No Tol Set')])),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('slug', models.SlugField(help_text='Unique identifier made of lowercase characters and underscores', unique=True, max_length=255)),
('description', models.TextField(help_text='Give a brief description of what type of tests should be included in this grouping')),
],
options={
'ordering': ('name',),
'verbose_name_plural': 'categories',
},
),
migrations.CreateModel(
name='Frequency',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='Display name for this frequency', unique=True, max_length=50)),
('slug', models.SlugField(help_text='Unique identifier made of lowercase characters and underscores for this frequency', unique=True)),
('nominal_interval', models.PositiveIntegerField(help_text='Nominal number of days between test completions')),
('due_interval', models.PositiveIntegerField(help_text='How many days since last completed until a test with this frequency is shown as due')),
('overdue_interval', models.PositiveIntegerField(help_text='How many days since last completed until a test with this frequency is shown as over due')),
],
options={
'ordering': ('nominal_interval',),
'verbose_name_plural': 'frequencies',
'permissions': (('can_choose_frequency', 'Choose QA by Frequency'),),
},
),
migrations.CreateModel(
name='Reference',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='Enter a short name for this reference', max_length=255)),
('type', models.CharField(default=b'numerical', max_length=15, choices=[(b'numerical', b'Numerical'), (b'boolean', b'Yes / No')])),
('value', models.FloatField(help_text='Enter the reference value for this test.')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(related_name='reference_creators', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)),
('modified_by', models.ForeignKey(related_name='reference_modifiers', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)),
],
),
migrations.CreateModel(
name='Test',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='Name for this test', unique=True, max_length=255, db_index=True)),
('slug', models.SlugField(help_text='A short variable name consisting of alphanumeric characters and underscores for this test (to be used in composite calculations). ', max_length=128, verbose_name=b'Macro name')),
('description', models.TextField(help_text='A concise description of what this test is for (optional. You may use HTML markup)', null=True, blank=True)),
('procedure', models.CharField(help_text='Link to document describing how to perform this test', max_length=512, null=True, blank=True)),
('chart_visibility', models.BooleanField(default=True, verbose_name=b'Test item visible in charts?')),
('auto_review', models.BooleanField(default=False, verbose_name='Allow auto review of this test?')),
('type', models.CharField(default=b'simple', help_text='Indicate if this test is a Boolean,Simple Numerical,Multiple Choice,Constant,Composite,String,String Composite,File Upload', max_length=10, choices=[(b'boolean', b'Boolean'), (b'simple', b'Simple Numerical'), (b'multchoice', b'Multiple Choice'), (b'constant', b'Constant'), (b'composite', b'Composite'), (b'string', b'String'), (b'scomposite', b'String Composite'), (b'upload', b'File Upload')])),
('hidden', models.BooleanField(default=False, help_text="Don't display this test when performing QA", verbose_name='Hidden')),
('skip_without_comment', models.BooleanField(default=False, help_text='Allow users to skip this test without a comment', verbose_name='Skip without comment')),
('display_image', models.BooleanField(default=False, help_text='Image uploads only: Show uploaded images under the testlist', verbose_name=b'Display image')),
('choices', models.CharField(help_text='Comma seperated list of choices for multiple choice test types', max_length=2048, null=True, blank=True)),
('constant_value', models.FloatField(help_text='Only required for constant value types', null=True, blank=True)),
('calculation_procedure', models.TextField(help_text='For Composite Tests Only: Enter a Python snippet for evaluation of this test.', null=True, blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(help_text='Choose a category for this test', to='qa.Category', on_delete=models.PROTECT)),
('created_by', models.ForeignKey(related_name='test_creator', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)),
('modified_by', models.ForeignKey(related_name='test_modifier', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)),
],
),
migrations.CreateModel(
name='TestInstance',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('review_date', models.DateTimeField(null=True, editable=False, blank=True)),
('pass_fail', models.CharField(db_index=True, max_length=20, editable=False, choices=[(b'not_done', b'Not Done'), (b'ok', b'OK'), (b'tolerance', b'Tolerance'), (b'action', b'Action'), (b'no_tol', b'No Tol Set')])),
('value', models.FloatField(help_text='For boolean Tests a value of 0 equals False and any non zero equals True', null=True)),
('string_value', models.CharField(max_length=1024, null=True, blank=True)),
('skipped', models.BooleanField(default=False, help_text='Was this test skipped for some reason (add comment)')),
('comment', models.TextField(help_text='Add a comment to this test', null=True, blank=True)),
('work_started', models.DateTimeField(editable=False, db_index=True)),
('work_completed', models.DateTimeField(default=django.utils.timezone.now, help_text=b'Format DD-MM-YY hh:mm (hh:mm is 24h time e.g. 31-05-12 14:30)', db_index=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('modified', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(related_name='test_instance_creator', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)),
('modified_by', models.ForeignKey(related_name='test_instance_modifier', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)),
('reference', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, editable=False, to='qa.Reference', null=True)),
('reviewed_by', models.ForeignKey(blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL)),
],
options={
'get_latest_by': 'work_completed',
'permissions': (('can_view_history', 'Can see test history when performing QA'), ('can_view_charts', 'Can view charts of test history'), ('can_review', 'Can review & approve tests'), ('can_skip_without_comment', 'Can skip tests without comment'), ('can_review_own_tests', 'Can review & approve self-performed tests')),
},
),
migrations.CreateModel(
name='TestInstanceStatus',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='Display name for this status type', unique=True, max_length=50)),
('slug', models.SlugField(help_text='Unique identifier made of lowercase characters and underscores for this status', unique=True)),
('description', models.TextField(help_text='Give a brief description of what type of test results should be given this status', null=True, blank=True)),
('is_default', models.BooleanField(default=False, help_text='Check to make this status the default for new Test Instances')),
('requires_review', models.BooleanField(default=True, help_text='Check to indicate that Test Instances with this status require further review')),
('export_by_default', models.BooleanField(default=True, help_text='Check to indicate whether tests with this status should be exported by default (e.g. for graphing/control charts)')),
('valid', models.BooleanField(default=True, help_text='If unchecked, data with this status will not be exported and the TestInstance will not be considered a valid completed Test')),
],
options={
'verbose_name_plural': 'statuses',
},
),
migrations.CreateModel(
name='TestList',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, db_index=True)),
('slug', models.SlugField(help_text='A short unique name for use in the URL of this list', unique=True)),
('description', models.TextField(help_text='A concise description of this test checklist. (You may use HTML markup)', null=True, blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('warning_message', models.CharField(default=b'Do not treat', help_text='Message given when a test value is out of tolerance', max_length=255)),
('created_by', models.ForeignKey(related_name='qa_testlist_created', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)),
('modified_by', models.ForeignKey(related_name='qa_testlist_modified', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)),
('sublists', models.ManyToManyField(help_text='Choose any sublists that should be performed as part of this list.', to='qa.TestList', blank=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TestListCycle',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, db_index=True)),
('slug', models.SlugField(help_text='A short unique name for use in the URL of this list', unique=True)),
('description', models.TextField(help_text='A concise description of this test checklist. (You may use HTML markup)', null=True, blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('drop_down_label', models.CharField(default=b'Choose Day', max_length=128)),
('day_option_text', models.CharField(default=b'day', max_length=8, choices=[(b'day', b'Day'), (b'tlname', b'Test List Name')])),
('created_by', models.ForeignKey(related_name='qa_testlistcycle_created', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)),
('modified_by', models.ForeignKey(related_name='qa_testlistcycle_modified', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TestListCycleMembership',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('order', models.IntegerField()),
('cycle', models.ForeignKey(to='qa.TestListCycle', on_delete=models.CASCADE)),
('test_list', models.ForeignKey(to='qa.TestList', on_delete=models.CASCADE)),
],
options={
'ordering': ('order',),
},
),
migrations.CreateModel(
name='TestListInstance',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('work_started', models.DateTimeField(db_index=True)),
('work_completed', models.DateTimeField(default=django.utils.timezone.now, null=True, db_index=True)),
('comment', models.TextField(help_text='Add a comment to this set of tests', null=True, blank=True)),
('in_progress', models.BooleanField(default=False, help_text='Mark this session as still in progress so you can complete later (will not be submitted for review)', db_index=True)),
('reviewed', models.DateTimeField(null=True, blank=True)),
('all_reviewed', models.BooleanField(default=False)),
('day', models.IntegerField(default=0)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField()),
('created_by', models.ForeignKey(related_name='test_list_instance_creator', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)),
('modified_by', models.ForeignKey(related_name='test_list_instance_modifier', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)),
('reviewed_by', models.ForeignKey(related_name='test_list_instance_reviewer', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL)),
('test_list', models.ForeignKey(editable=False, to='qa.TestList', on_delete=models.PROTECT)),
],
options={
'get_latest_by': 'work_completed',
'permissions': (('can_override_date', 'Can override date'), ('can_perform_subset', 'Can perform subset of tests'), ('can_view_completed', 'Can view previously completed instances')),
},
),
migrations.CreateModel(
name='TestListMembership',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('order', models.IntegerField(db_index=True)),
('test', models.ForeignKey(to='qa.Test', on_delete=models.CASCADE)),
('test_list', models.ForeignKey(to='qa.TestList', on_delete=models.CASCADE)),
],
options={
'ordering': ('order',),
},
),
migrations.CreateModel(
name='Tolerance',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.CharField(help_text='Select whether this will be an absolute or relative tolerance criteria', max_length=20, choices=[(b'absolute', b'Absolute'), (b'percent', b'Percentage'), (b'multchoice', b'Multiple Choice')])),
('act_low', models.FloatField(help_text='Value of lower Action level', null=True, verbose_name='Action Low', blank=True)),
('tol_low', models.FloatField(help_text='Value of lower Tolerance level', null=True, verbose_name='Tolerance Low', blank=True)),
('tol_high', models.FloatField(help_text='Value of upper Tolerance level', null=True, verbose_name='Tolerance High', blank=True)),
('act_high', models.FloatField(help_text='Value of upper Action level', null=True, verbose_name='Action High', blank=True)),
('mc_pass_choices', models.CharField(help_text='Comma seperated list of choices that are considered passing', max_length=2048, null=True, verbose_name='Multiple Choice OK Values', blank=True)),
('mc_tol_choices', models.CharField(help_text='Comma seperated list of choices that are considered at tolerance', max_length=2048, null=True, verbose_name='Multiple Choice Tolerance Values', blank=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(related_name='tolerance_creators', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)),
('modified_by', models.ForeignKey(related_name='tolerance_modifiers', editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)),
],
options={
'ordering': ['type', 'act_low', 'tol_low', 'tol_high', 'act_high'],
},
),
migrations.CreateModel(
name='UnitTestCollection',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('due_date', models.DateTimeField(help_text='Next time this item is due', null=True, blank=True)),
('auto_schedule', models.BooleanField(default=True, help_text='If this is checked, due_date will be auto set based on the assigned frequency')),
('active', models.BooleanField(default=True, help_text='Uncheck to disable this test on this unit', db_index=True)),
('object_id', models.PositiveIntegerField()),
('assigned_to', models.ForeignKey(to='auth.Group', help_text='QA group that this test list should nominally be performed by', null=True, on_delete=models.PROTECT)),
('content_type', models.ForeignKey(to='contenttypes.ContentType', on_delete=models.PROTECT)),
('frequency', models.ForeignKey(blank=True, to='qa.Frequency', help_text='Frequency with which this test list is to be performed', null=True, on_delete=models.SET_NULL)),
('last_instance', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, editable=False, to='qa.TestListInstance', null=True)),
('unit', models.ForeignKey(to='units.Unit', on_delete=models.PROTECT)),
('visible_to', models.ManyToManyField(help_text='Select groups who will be able to see this test collection on this unit', related_name='test_collection_visibility', to='auth.Group')),
],
options={
'verbose_name_plural': 'Assign Test Lists to Units',
'permissions': (('can_view_overview', 'Can view program overview'), ('can_review_non_visible_tli', "Can view tli and utc not visible to user's groups")),
},
),
migrations.CreateModel(
name='UnitTestInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('active', models.BooleanField(default=True, help_text='Uncheck to disable this test on this unit', db_index=True)),
('assigned_to', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='auth.Group', help_text='QA group that this test list should nominally be performed by', null=True)),
('reference', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, verbose_name='Current Reference', blank=True, to='qa.Reference', null=True)),
('test', models.ForeignKey(to='qa.Test', on_delete=models.PROTECT)),
('tolerance', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='qa.Tolerance', null=True)),
('unit', models.ForeignKey(to='units.Unit', on_delete=models.PROTECT)),
],
options={
'verbose_name_plural': 'Set References & Tolerances',
'permissions': (('can_view_ref_tol', 'Can view Refs and Tols'),),
},
),
migrations.AddField(
model_name='testlistinstance',
name='unit_test_collection',
field=models.ForeignKey(editable=False, to='qa.UnitTestCollection', on_delete=models.PROTECT),
),
migrations.AddField(
model_name='testlistcycle',
name='test_lists',
field=models.ManyToManyField(to='qa.TestList', through='qa.TestListCycleMembership'),
),
migrations.AddField(
model_name='testlist',
name='tests',
field=models.ManyToManyField(help_text='Which tests does this list contain', to='qa.Test', through='qa.TestListMembership'),
),
migrations.AddField(
model_name='testinstance',
name='status',
field=models.ForeignKey(to='qa.TestInstanceStatus', on_delete=models.PROTECT),
),
migrations.AddField(
model_name='testinstance',
name='test_list_instance',
field=models.ForeignKey(editable=False, to='qa.TestListInstance', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='testinstance',
name='tolerance',
field=models.ForeignKey(on_delete=models.PROTECT, blank=True, editable=False, to='qa.Tolerance', null=True),
),
migrations.AddField(
model_name='testinstance',
name='unit_test_info',
field=models.ForeignKey(editable=False, to='qa.UnitTestInfo', on_delete=models.PROTECT),
),
migrations.AddField(
model_name='autoreviewrule',
name='status',
field=models.ForeignKey(to='qa.TestInstanceStatus', on_delete=models.CASCADE),
),
migrations.AlterUniqueTogether(
name='unittestinfo',
unique_together=set([('test', 'unit')]),
),
migrations.AlterUniqueTogether(
name='unittestcollection',
unique_together=set([('unit', 'frequency', 'content_type', 'object_id')]),
),
migrations.AlterUniqueTogether(
name='testlistmembership',
unique_together=set([('test_list', 'test')]),
),
]
| 74.572308 | 469 | 0.637523 | 24,071 | 0.993192 | 0 | 0 | 0 | 0 | 0 | 0 | 8,397 | 0.346468 |
6f1b13d2e45c97356d3de371d486f2f4c6321a9d | 746 | py | Python | cloudygram_api_server/models/telethon_model.py | Maverick1983/cloudygram-api-server | acb0b0ed173ebfff8b1a2b69efef3abe943e735e | [
"Unlicense"
]
| 2 | 2021-05-25T15:24:03.000Z | 2021-05-27T09:35:56.000Z | cloudygram_api_server/models/telethon_model.py | skurob/cgas | 7660064882c5d5e56dbc4aa7e5be99754ffdcfd6 | [
"Unlicense"
]
| 1 | 2021-05-27T08:32:55.000Z | 2021-05-27T10:02:35.000Z | cloudygram_api_server/models/telethon_model.py | skurob/cgas | 7660064882c5d5e56dbc4aa7e5be99754ffdcfd6 | [
"Unlicense"
]
| 1 | 2021-06-03T10:06:49.000Z | 2021-06-03T10:06:49.000Z | from .constants import SUCCESS_KEY, MESSAGE_KEY, DATA_KEY
from cloudygram_api_server.scripts import CGMessage
from typing import List
class TtModels:
@staticmethod
def sing_in_failure(message) -> dict:
return {
SUCCESS_KEY : False,
MESSAGE_KEY : message
}
@staticmethod
def send_code_failure(message) -> dict:
return {
SUCCESS_KEY : False,
MESSAGE_KEY : message
}
@staticmethod
def message_list(messages) -> dict:
mapped_messages: List[str] = []
for m in messages:
mapped_messages.append(CGMessage.map_from_tt(m))
return {
SUCCESS_KEY: True,
DATA_KEY: mapped_messages
}
| 25.724138 | 60 | 0.608579 | 609 | 0.816354 | 0 | 0 | 577 | 0.773458 | 0 | 0 | 0 | 0 |
6f1b8a527ec012630d1bead41b940dac1320a132 | 4,617 | py | Python | source1/bsp/entities/portal2_entity_handlers.py | tltneon/SourceIO | 418224918c2b062a4c78a41d4d65329ba2decb22 | [
"MIT"
]
| 199 | 2019-04-02T02:30:58.000Z | 2022-03-30T21:29:49.000Z | source1/bsp/entities/portal2_entity_handlers.py | syborg64/SourceIO | e4ba86d801f518e192260af08ef533759c2e1cc3 | [
"MIT"
]
| 113 | 2019-03-03T19:36:25.000Z | 2022-03-31T19:44:05.000Z | source1/bsp/entities/portal2_entity_handlers.py | syborg64/SourceIO | e4ba86d801f518e192260af08ef533759c2e1cc3 | [
"MIT"
]
| 38 | 2019-05-15T16:49:30.000Z | 2022-03-22T03:40:43.000Z | import math
from mathutils import Euler
import bpy
from .portal2_entity_classes import *
from .portal_entity_handlers import PortalEntityHandler
local_entity_lookup_table = PortalEntityHandler.entity_lookup_table.copy()
local_entity_lookup_table.update(entity_class_handle)
class Portal2EntityHandler(PortalEntityHandler):
entity_lookup_table = local_entity_lookup_table
pointlight_power_multiplier = 1000
def handle_prop_weighted_cube(self, entity: prop_weighted_cube, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_weighted_cube', obj, 'props')
def handle_prop_testchamber_door(self, entity: prop_testchamber_door, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_testchamber_door', obj, 'props')
def handle_prop_floor_button(self, entity: prop_floor_button, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_floor_button', obj, 'props')
def handle_prop_floor_ball_button(self, entity: prop_floor_ball_button, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_floor_ball_button', obj, 'props')
def handle_prop_floor_cube_button(self, entity: prop_floor_cube_button, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_floor_cube_button', obj, 'props')
def handle_prop_under_floor_button(self, entity: prop_under_floor_button, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_under_floor_button', obj, 'props')
def handle_prop_tractor_beam(self, entity: prop_tractor_beam, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_tractor_beam', obj, 'props')
def handle_logic_playmovie(self, entity: logic_playmovie, entity_raw: dict):
obj = bpy.data.objects.new(self._get_entity_name(entity), None)
self._set_location(obj, entity.origin)
self._set_icon_if_present(obj, entity)
self._set_entity_data(obj, {'entity': entity_raw})
self._put_into_collection('logic_playmovie', obj, 'logic')
def handle_trigger_paint_cleanser(self, entity: trigger_paint_cleanser, entity_raw: dict):
if 'model' not in entity_raw:
return
model_id = int(entity_raw.get('model')[1:])
mesh_object = self._load_brush_model(model_id, self._get_entity_name(entity))
self._set_location_and_scale(mesh_object, parse_float_vector(entity_raw.get('origin', '0 0 0')))
self._set_rotation(mesh_object, parse_float_vector(entity_raw.get('angles', '0 0 0')))
self._set_entity_data(mesh_object, {'entity': entity_raw})
self._put_into_collection('trigger_paint_cleanser', mesh_object, 'triggers')
def handle_trigger_catapult(self, entity: trigger_catapult, entity_raw: dict):
if 'model' not in entity_raw:
return
model_id = int(entity_raw.get('model')[1:])
mesh_object = self._load_brush_model(model_id, self._get_entity_name(entity))
self._set_location_and_scale(mesh_object, parse_float_vector(entity_raw.get('origin', '0 0 0')))
self._set_rotation(mesh_object, parse_float_vector(entity_raw.get('angles', '0 0 0')))
self._set_entity_data(mesh_object, {'entity': entity_raw})
self._put_into_collection('trigger_catapult', mesh_object, 'triggers')
def handle_npc_wheatley_boss(self, entity: npc_wheatley_boss, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('npc_wheatley_boss', obj, 'npc')
def handle_prop_exploding_futbol(self, entity: prop_exploding_futbol, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_exploding_futbol', obj, 'props')
def handle_prop_exploding_futbol_socket(self, entity: prop_exploding_futbol_socket, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_exploding_futbol', obj, 'props')
def handle_prop_exploding_futbol_spawnert(self, entity: prop_exploding_futbol_spawner, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_exploding_futbol_spawner', obj, 'props')
| 53.068966 | 109 | 0.753736 | 4,338 | 0.939571 | 0 | 0 | 0 | 0 | 0 | 0 | 523 | 0.113277 |
6f1ed343bbac27b5996271e2bb652c962f6512bc | 3,935 | py | Python | michelanglo_api/ss_parser.py | matteoferla/MichelaNGLo-api | c00749d4b9385785f777bd6613ea8327381a3f38 | [
"MIT"
]
| 1 | 2020-05-23T07:42:24.000Z | 2020-05-23T07:42:24.000Z | michelanglo_api/ss_parser.py | matteoferla/MichelaNGLo-api | c00749d4b9385785f777bd6613ea8327381a3f38 | [
"MIT"
]
| null | null | null | michelanglo_api/ss_parser.py | matteoferla/MichelaNGLo-api | c00749d4b9385785f777bd6613ea8327381a3f38 | [
"MIT"
]
| null | null | null | from collections import namedtuple
class SSParser:
"""
Create a SS block from PDB data.
Written to be agnostic of PDB parser, but for now only has PyMOL.
.. code-block:: python
import pymol2
with pymol2.PyMOL() as pymol:
pymol.cmd.load('model.pdb', 'prot')
ss = SSParser().parse_pymol(pymol.cmd)
print(ss)
# or
SSParser.correct_file('model.pdb', True)
Do note that the lines seem offset because SHEET has a name parameter.
HELIX 1 HA GLY A 86 GLY A 94 1 9
SHEET 5 A 5 GLY A 52 PHE A 56 -1 N PHE A 56 O TRP A 71
SHEET 1 B 5 THR B 107 ARG B 110 0
"""
# faux pymol atom
Atom = namedtuple('Atom', ['ss', 'resi', 'resn', 'chain'])
def __init__(self):
# none of the attributes are actually public.
self.ss = []
self.start = self.Atom('L', 0, 'XXX', 'X')
self.previous = self.Atom('L', 0, 'XXX', 'X')
self.ss_count = {'H': 1, 'S': 1, 'L': 0}
def parse_pymol(self, cmd, selector: str = 'name ca') -> str:
atoms = list(cmd.get_model(selector).atom)
return self.parse(atoms)
def parse(self, atoms: list) -> str:
"""
atoms is a list of objects with 'ss', 'resi', 'resn'.
one per residue (CA).
This does not collapse the list into a list of ranges, as resn is also require etc.
:param atoms:
:return:
"""
for current in atoms:
if self.previous.ss != current.ss or self.previous.chain != current.chain: # different
self._store_ss() # the previous ss has come to an end.
# deal with current
if current.ss in ('S', 'H'): # start of a new
self.start = current
# move on
self.previous = current
self._store_ss()
return str(self)
def _store_ss(self):
"""
The SS sequence has come to an end: store it.
:return:
"""
if self.previous.ss == '':
return # not AA?
if int(self.previous.resi) == int(self.start.resi) + 1:
return # too short
cc = self.ss_count[self.previous.ss]
if self.previous.ss == 'H': # previous was the other type
self.ss.append(
f'HELIX {cc: >3} {cc: >3} ' +
f'{self.start.resn} {self.start.chain} {self.start.resi: >4} ' +
f'{self.previous.resn} {self.previous.chain} {self.previous.resi: >4} 1' +
' ' +
f'{int(self.previous.resi) - int(self.start.resi): >2}'
)
self.ss_count[self.previous.ss] += 1
elif self.previous.ss == 'S': # previous was the other type
self.ss.append(
f'SHEET {cc: >3} {cc: >2}S 1 ' +
f'{self.start.resn} {self.start.chain}{self.start.resi: >4} ' +
f'{self.previous.resn} {self.previous.chain}{self.previous.resi: >4} 0')
self.ss_count[self.previous.ss] += 1
else:
# loop? Nothing.
pass
def __str__(self):
return '\n'.join(self.ss) +'\n'
@classmethod
def correct_file(cls, filename: str, write:bool=True):
import pymol2
with pymol2.PyMOL() as pymol:
pymol.cmd.load(filename, 'prot')
ss = cls().parse_pymol(pymol.cmd)
with open(filename, 'r') as fh:
block = fh.read()
if write:
with open(filename, 'w') as fh:
fh.write(ss + block)
return ss + block
@classmethod
def correct_block(cls, block: str):
import pymol2
with pymol2.PyMOL() as pymol:
pymol.cmd.read_pdbstr(block, 'prot')
ss = cls().parse_pymol(pymol.cmd)
return ss + block
| 36.775701 | 99 | 0.516645 | 3,897 | 0.990343 | 0 | 0 | 651 | 0.165438 | 0 | 0 | 1,764 | 0.448285 |
6f1f6d17456ac645513cd747a8b58ba607f3346f | 748 | py | Python | Net640/apps/user_posts/mixin.py | 86Ilya/net640kb | 6724f3da3b678b637e0e776ee0d4953753ee2e05 | [
"MIT"
]
| 1 | 2019-06-18T09:50:29.000Z | 2019-06-18T09:50:29.000Z | Net640/apps/user_posts/mixin.py | 86Ilya/net640kb | 6724f3da3b678b637e0e776ee0d4953753ee2e05 | [
"MIT"
]
| 10 | 2019-12-24T07:05:29.000Z | 2022-02-10T07:42:44.000Z | Net640/apps/user_posts/mixin.py | 86Ilya/net640kb | 6724f3da3b678b637e0e776ee0d4953753ee2e05 | [
"MIT"
]
| null | null | null | from django.urls import reverse
from Net640.settings import FRONTEND_DATE_FORMAT
class AsDictMessageMixin:
"""
Mixin for representing user messages(post, comments) as dictionaries
"""
def as_dict(self, executor):
return {'content': self.content,
'user_has_like': self.has_like(executor),
'is_owner': self.user == executor,
'rating': round(self.get_rating(), 1),
'author': self.user.username,
'author_page': reverse('friends:user_view', kwargs={'user_id': self.user.id}),
'date': self.date.strftime(FRONTEND_DATE_FORMAT),
'id': self.id,
'author_thumbnail_url': self.user.get_thumbnail_url(), }
| 37.4 | 94 | 0.605615 | 664 | 0.887701 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.278075 |
6f1f734997fb69804fc6859e112a7faf8e27b40b | 16,030 | py | Python | squids/tfrecords/maker.py | mmgalushka/squids | 2d6e1bbeb89721a2ff232a7031997111c600abb6 | [
"MIT"
]
| null | null | null | squids/tfrecords/maker.py | mmgalushka/squids | 2d6e1bbeb89721a2ff232a7031997111c600abb6 | [
"MIT"
]
| 37 | 2022-01-15T21:42:23.000Z | 2022-02-23T23:43:31.000Z | squids/tfrecords/maker.py | mmgalushka/squids | 2d6e1bbeb89721a2ff232a7031997111c600abb6 | [
"MIT"
]
| null | null | null | """A module for converting a data source to TFRecords."""
import os
import json
import copy
import csv
from pathlib import Path
from shutil import rmtree
import PIL.Image as Image
import tensorflow as tf
from tqdm import tqdm
from .feature import items_to_features
from .errors import DirNotFoundError, InvalidDatasetFormat
from ..config import IMAGE_WIDTH, IMAGE_HEIGHT, DATASET_DIR, TFRECORDS_SIZE
# ------------------------------------------------------------------------------
# CSV/COCO Dataset Detectors
# ------------------------------------------------------------------------------
def is_csv_input(input_dir: Path) -> bool:
"""
Tests if the input directory represents CSV dataset format.
Args:
input_dir (Path):
The input directory to test.
Returns:
status (bool):
Returns `True` if the input directory represents CSV dataset
format and `False` otherwise.
"""
return set(os.listdir(input_dir)) == set(
[
"images",
"instances_train.csv",
"instances_test.csv",
"instances_val.csv",
"categories.json",
]
)
def is_coco_input(input_dir: Path) -> bool:
"""
Tests if the input directory represents COCO dataset format.
Args:
input_dir (Path):
The input directory to test.
Returns:
status (bool):
Returns `True` if the input directory represents COCO dataset
format and `False` otherwise.
"""
root_artifacts = os.listdir(input_dir)
if "annotations" in root_artifacts:
annotations_artifacts = os.listdir(input_dir / "annotations")
stems_artifacts = [
Path(artifact).stem for artifact in annotations_artifacts
]
return set(stems_artifacts).issubset(set(root_artifacts))
return False
# ------------------------------------------------------------------------------
# CSV/COCO Dataset Iterators
# ------------------------------------------------------------------------------
class CategoriesMap:
"""
A dictionary-like object for intelligently mapping categories.
The goal of this class is to remap user-specified categories for the
compact one-hot encoding. Let's review a simple example. Assume, the
original data has images that include objects belonging to the two
categories: 15 and 20. If we do not remap these categories, then we
need to create one-hot with length `max(15,20) + 1 = 21` (plus one
is to allow one additional category "no object"). This creates
unnecessary overhead during the model training. The most intuitive
solution would be to remap the original categories to the following
`{15: 1, 20: 2}`. In this case, the one-hot encoding length would be
`max(1,2) + 1 = 3` .
To initiate remapping the `selected_categories` argument should be
defined. All selected category IDs will be sorted in ascending order
with the consequent re-assignment to the new IDs. For example, let's
assume the specified selected categories are `[12, 5, 3, 23]`, after
sorting this list will have the following `[3, 5, 12, 23]` and the
remapping `{3: 1, 5: 2, 12: 3, 23: 4}`.
If the `selected_categories` argument is defined the following
operation `map[ORIGINAL_ID]` returns `NEW_ID` (remapped category ID).
If the `selected_categories` argument does not defined the following
operation `map[ORIGINAL_ID]` returns `ORIGINAL_ID` (in other words
its remaps ID to itself).
Args:
selected_categories (list):
The list of categories to map.
"""
def __init__(self, selected_categories: list):
self.__categories_mapping = {}
if len(selected_categories) > 0:
for new_category_id, old_category_id in enumerate(
sorted(selected_categories)
):
self.__categories_mapping[old_category_id] = (
new_category_id + 1
)
def __getitem__(self, category_id):
"""Returns the remapped category ID."""
if self.__categories_mapping:
return self.__categories_mapping[category_id]
else:
return category_id
def __contains__(self, category_id):
"""Tests if the specified category ID in the map."""
if self.__categories_mapping:
return category_id in self.__categories_mapping
else:
return True
class DatasetIterator:
"""
The base class for dataset records iterator.
Args:
records (list):
The list with records to iterate.
image_dir (Path):
The base path for loading images.
"""
def __init__(self, records: list, image_dir: Path):
self.__records = records
self.__image_dir = image_dir
self.__size = len(self.__records)
self.__pointer = 0
def __iter__(self):
"""Returns the dataset records iterator."""
return self
def __len__(self):
"""Returns a number of records in the dataset."""
return self.__size
def __next__(self):
"""Returns the next record."""
if self.__pointer >= self.__size:
raise StopIteration
record = self.__records[self.__pointer]
record["image"]["data"] = Image.open(
self.__image_dir / record["image"]["file_name"]
)
self.__pointer += 1
return record
class CsvIterator(DatasetIterator):
"""
The CSV dataset iterator.
Args:
instance_file (Path):
The path to the `csv`-file with records to iterate.
selected_categories (list):
The list of category IDs on which iteration should take place.
If an image within a record does not contain a selected category
it's skipped. If the selected category IDs do not define, then
iterate goes via all images.
"""
def __init__(self, instance_file: Path, selected_categories: list):
categories_map = CategoriesMap(selected_categories)
categories = dict()
with open(instance_file.parent / "categories.json") as fp:
for category in json.load(fp)["categories"]:
category_id = category["id"]
if category_id in categories_map:
# Remaps ald category ID to the new one.
new_category = copy.deepcopy(category)
new_category["id"] = categories_map[category["id"]]
categories[new_category["id"]] = new_category
records = []
with open(instance_file, newline="\n") as csv_fp:
csv_reader = csv.DictReader(csv_fp, delimiter=",", quotechar='"')
for row in csv_reader:
annotations = []
for bbox, segmentation, category_id in zip(
json.loads(row["bboxes"]),
json.loads(row["segmentations"]),
json.loads(row["category_ids"]),
):
if category_id in categories_map:
annotations.append(
{
"bbox": bbox,
"iscrowd": 0,
"segmentation": [segmentation],
"category_id": categories_map[category_id],
}
)
# Here we discard all images which do not have any
# annotations for the selected categories.
if len(annotations) > 0:
records.append(
{
"image": {
"id": int(row["image_id"]),
"file_name": row["file_name"],
},
"annotations": annotations,
"categories": categories,
}
)
super().__init__(records, instance_file.parent / "images")
class CocoIterator(DatasetIterator):
"""
The COCO dataset iterator.
Args:
instance_file (Path):
The path to the `json`-file with records to iterate.
selected_categories (list):
The list of category IDs on which iteration should take place.
If an image within a record does not contain a selected category
it's skipped. If the selected category IDs do not define, then
iterate goes via all images.
"""
def __init__(self, instance_file: Path, selected_categories: list):
categories_map = CategoriesMap(selected_categories)
with open(instance_file) as f:
content = json.load(f)
annotations = dict()
for annotation in content["annotations"]:
category_id = annotation["category_id"]
if category_id in categories_map:
image_id = annotation["image_id"]
if image_id not in annotations:
annotations[image_id] = []
# Remaps ald category ID to the new one.
new_annotation = copy.deepcopy(annotation)
new_annotation["category_id"] = categories_map[category_id]
annotations[image_id].append(new_annotation)
categories = dict()
for category in content["categories"]:
category_id = category["id"]
if category_id in categories_map:
# Remaps ald category ID to the new one.
new_category = copy.deepcopy(category)
new_category["id"] = categories_map[category_id]
categories[new_category["id"]] = new_category
records = []
for image in content["images"]:
if image["id"] in annotations:
records.append(
{
"image": image,
"annotations": annotations[image["id"]],
"categories": categories,
}
)
super().__init__(
records, instance_file.parent.parent / instance_file.stem
)
# ------------------------------------------------------------------------------
# Dataset to TFRecords Transformer
# ------------------------------------------------------------------------------
def instances_to_tfrecords(
instance_file: Path,
output_dir: Path,
items: DatasetIterator,
size: int,
image_width: int,
image_height: int,
verbose: bool,
):
"""
Converse instances to tfrecords.
Args:
instance_file (Path):
The path to the instance file to read data from.
output_dir (Path):
The path to the output directory to save generated TFRecords.
items (DatasetIterator):
The CSV or COCO dataset iterator.
size (int):
The number of images per partion.
image_width (int):
The TFRecords image width resize to.
image_height (int):
The TFRecords image height resize to.
verbose (bool):
The flag to set verbose mode.
"""
def get_example(item):
image_id = item["image"]["id"]
img = item["image"]["data"]
annotations = item["annotations"]
categories = item["categories"]
category_max_id = max(list(categories.keys()))
bboxes = []
segmentations = []
category_ids = []
for annotation in annotations:
if annotation["iscrowd"] == 0:
bboxes.append(annotation["bbox"])
segmentations.append(annotation["segmentation"][0])
category_ids.append(annotation["category_id"])
feature = items_to_features(
image_id,
img,
image_width,
image_height,
bboxes,
segmentations,
category_ids,
category_max_id,
)
return tf.train.Example(features=tf.train.Features(feature=feature))
tfrecords_dir = output_dir / instance_file.stem
tfrecords_dir.mkdir(exist_ok=True)
# The TFRecords writer.
writer = None
# The index for the next TFRecords partition.
part_index = -1
# The count of how many records stored in the TFRecords files. It
# is set here to maximum capacity (as a trick) to make the "if"
# condition in the loop equals to True and start 0 - partition.
part_count = size
# Initializes the progress bar of verbose mode is on.
if verbose:
pbar = tqdm(total=len(items))
for item in items:
if item:
if part_count >= size:
# The current partition has been reached the maximum capacity,
# so we need to start a new one.
if writer is not None:
# Closes the existing TFRecords writer.
writer.close()
part_index += 1
writer = tf.io.TFRecordWriter(
str(tfrecords_dir / f"part-{part_index}.tfrecord")
)
part_count = 0
example = get_example(item)
if example:
writer.write(example.SerializeToString())
part_count += 1
# Updates the progress bar of verbose mode is on.
if verbose:
pbar.update(1)
# Closes the existing TFRecords writer after the last row.
writer.close()
def create_tfrecords(
dataset_dir: str = DATASET_DIR,
tfrecords_dir: str = None,
size: int = TFRECORDS_SIZE,
image_width: int = IMAGE_WIDTH,
image_height: int = IMAGE_HEIGHT,
selected_categories: list = [],
verbose: bool = False,
):
"""
This function transforms CSV or COCO dataset to TFRecords.
Args:
dataset_dir (str):
The path to the data set directory to transform.
tfrecords_dir (str):
The path to the output directory to save generated TFRecords.
size (int):
The number of images per partion.
image_width (int):
The TFRecords image width resize to.
image_height (int):
The TFRecords image height resize to.
selected_categories (list):
The list of selected category IDs.
verbose (bool):
The flag to set verbose mode.
Raises:
DirNotFoundError:
If input or output directories do not exist.
InvalidDatasetFormat:
If the input dataset has invalid CSV or COCO format.
"""
input_dir = Path(dataset_dir)
if not input_dir.exists():
raise DirNotFoundError("input dataset", input_dir)
if tfrecords_dir is None:
output_dir = input_dir.parent / (input_dir.name + "-tfrecords")
else:
output_dir = Path(tfrecords_dir)
if not output_dir.parent.exists():
raise DirNotFoundError("parent (to output)", output_dir.parent)
if output_dir.exists():
rmtree(output_dir)
output_dir.mkdir(exist_ok=True)
if is_csv_input(input_dir):
for instance_file in input_dir.rglob("*.csv"):
instances_to_tfrecords(
instance_file,
output_dir,
CsvIterator(instance_file, selected_categories),
size,
image_width,
image_height,
verbose,
)
elif is_coco_input(input_dir):
for instance_file in (input_dir / "annotations").rglob("*.json"):
instances_to_tfrecords(
instance_file,
output_dir,
CocoIterator(instance_file, selected_categories),
size,
image_width,
image_height,
verbose,
)
else:
raise InvalidDatasetFormat()
| 33.818565 | 80 | 0.564255 | 8,221 | 0.512851 | 0 | 0 | 0 | 0 | 0 | 0 | 7,014 | 0.437555 |
6f1f9754bb7f6d41b30e4a4c10cead5e654ca04e | 2,743 | py | Python | edexOsgi/com.raytheon.edex.plugin.gfe/utility/cave_static/user/GFETEST/gfe/userPython/smartTools/ExUtil1.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
]
| null | null | null | edexOsgi/com.raytheon.edex.plugin.gfe/utility/cave_static/user/GFETEST/gfe/userPython/smartTools/ExUtil1.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
]
| null | null | null | edexOsgi/com.raytheon.edex.plugin.gfe/utility/cave_static/user/GFETEST/gfe/userPython/smartTools/ExUtil1.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
]
| 1 | 2021-10-30T00:03:05.000Z | 2021-10-30T00:03:05.000Z | ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# ExUtil1
#
# Author:
# ----------------------------------------------------------------------------
ToolType = "numeric"
WeatherElementEdited = "T"
from numpy import *
import SmartScript
import Common
VariableList = [("Model:" , "", "D2D_model")]
class Tool (SmartScript.SmartScript):
def __init__(self, dbss):
self._dbss = dbss
SmartScript.SmartScript.__init__(self, dbss)
def execute(self, GridTimeRange, Topo, varDict):
"This tool accesses T grids directly"
self._common = Common.Common(self._dbss)
model = varDict["Model:"]
# Convert Topo to meters
topo_M = self._common._convertFtToM(Topo)
# Make a sounding cubes for T
# Height will increase in the sounding and be the
# first dimension
levels = ["MB1000","MB850", "MB700","MB500"]
gh_Cube, t_Cube = self.makeNumericSounding(
model, "t", levels, GridTimeRange)
print "Cube shapes ", gh_Cube.shape, t_Cube.shape
# Make an initial T grid with values of -200
# This is an out-of-range value to help us identify values that
# have already been set.
T = (Topo * 0) - 200
# Work "upward" in the cubes to assign T
# We will only set the value once, i.e. the first time the
# gh height is greater than the Topo
# For each level
for i in xrange(gh_Cube.shape[0]):
# where ( gh > topo and T == -200),
# set to t_Cube value, otherwise keep value already set))
T = where(logical_and(greater(gh_Cube[i], topo_M), equal(T,-200)), t_Cube[i], T)
# Convert from K to F
T_F = self.convertKtoF(T)
return T_F
| 34.2875 | 96 | 0.596792 | 1,480 | 0.539555 | 0 | 0 | 0 | 0 | 0 | 0 | 1,722 | 0.62778 |
6f1fef78694338432a72024d0e2abb835ff193fd | 5,335 | py | Python | venv/KryptoSkattScript/mining_income.py | odgaard/KryptoSkatt | 60338f25af2300b165738ceac033aae72969f7c5 | [
"MIT"
]
| null | null | null | venv/KryptoSkattScript/mining_income.py | odgaard/KryptoSkatt | 60338f25af2300b165738ceac033aae72969f7c5 | [
"MIT"
]
| null | null | null | venv/KryptoSkattScript/mining_income.py | odgaard/KryptoSkatt | 60338f25af2300b165738ceac033aae72969f7c5 | [
"MIT"
]
| null | null | null | import pathlib
import datetime
path = 'c:/Users/Jacob/PycharmProjects/KryptoSkatt/Data/'
trans_in = list()
trans_out = list()
bitcoin_dict = dict()
ethereum_dict = dict()
USD_NOK_dict = dict()
def unix_time_to_date(timestamp):
return datetime.datetime.fromtimestamp(int(timestamp)).strftime('%Y-%m-%d')
def populate_electrum_wallet_transactions(file):
# Exported data from Bitcoin wallet
# All incoming transactions to my wallet
with open(pathlib.PureWindowsPath(path + file), 'r') as f:
total = 0
for line in f.read().split('\n'):
new_line = line.split(',')
if(len(new_line) > 3):
if(new_line[3][0] == '+'): trans_in.append(new_line)
if(new_line[3][0] == '-'): trans_out.append(new_line)
def populate_bitcoin_price_index(file):
# Gather Bitcoin price index from
# https://blockchain.info/charts/market-price?timespan=2years
with open(pathlib.PureWindowsPath(path + file), 'r') as f:
for line in f.read().split('\n'):
line = line.split(' ')
final_line = [line[0], line[1].split(',')[1]]
bitcoin_dict[final_line[0]] = float(final_line[1])
def populate_ethereum_price_index(file):
# Gather Ethereum price index from
# https://www.etherchain.org/charts/priceUSD
with open(pathlib.PureWindowsPath(path + file), 'r') as f:
for line in f.read().split('\n')[1:]:
line = line.split(',')
new_line = [line[0].replace('"',''), line[1]]
# Max one entry per day
date = new_line[0].split(' ')[0]
price = float(new_line[1])
ethereum_dict[date] = float(price)
def populate_USD_NOK_conversion(file):
# Gather USD to NOK conversion from
# https://data.norges-bank.no/api/data/EXR/B.USD.NOK.SP?StartPeriod=2017&EndPeriod=2018&format=csv-:-comma-true-flat
with open(pathlib.PureWindowsPath(path + file), 'r') as f:
for line in f.read().split('\n'):
line = line.split(',')
USD_NOK_dict[line[5]] = float(line[6])
def get_bitcoin_income(year, start_value):
# Calculate total income based on data
total_NOK, total_BTC = 0, 0
USD_NOK_exchange = start_value #First day of the year
for trans in trans_in:
date = trans[4].split(' ')[0]
if(date.split('-')[0] != year): continue
if(USD_NOK_dict.get(date)):
USD_NOK_exchange = USD_NOK_dict[date]
result = bitcoin_dict[date] * float(trans[3][1:]) * USD_NOK_exchange
total_BTC += float(trans[3][1:])
total_NOK += result
return total_NOK, total_BTC
def get_ethereum_income(file, start_value):
total = 0
# Gather mining data from ethermine (my mining pool)
# https://ethermine.org/api/miner
with open(pathlib.PureWindowsPath(path + file), 'r') as f:
total_NOK, total_ETH = 0, 0
USD_NOK_exchange = start_value #First day of the year
for line in f.read().split('\n')[1:]:
new_line = [string.replace('"', '') for string in line.split(',')[1:]]
amount = int(new_line[2])/10**18
## Calculate income
date = unix_time_to_date(new_line[4])
if(USD_NOK_dict.get(date)):
USD_NOK_exchange = USD_NOK_dict[date]
result = ethereum_dict[date]*amount*USD_NOK_exchange
total_NOK += result
total_ETH += amount
return total_NOK, total_ETH
def setup():
populate_USD_NOK_conversion('EXR.csv')
populate_bitcoin_price_index('market-price-last-2-years.csv')
populate_electrum_wallet_transactions('electrum-history.csv')
populate_ethereum_price_index('ethereum-usd-price.csv')
def main():
setup()
# This should always be the last day of the year, or the first day of the next year.
final_date_crypto = "2017-12-31"
# This should always be the last working day for the Norwegian exchange,
# or the first working day of the next year.
final_date_USD_NOK = "2017-12-29"
# The USD-NOK conversion for the first day of the year.
# The Norwegian exchanges might be closes on this day, so it can't be computed directly
start_value = 8.652
income_btc_in_NOK, total_BTC = get_bitcoin_income("2017", start_value)
income_eth_in_NOK, total_ETH = get_ethereum_income('payouts.csv', start_value)
income_in_NOK = income_btc_in_NOK + income_eth_in_NOK
# Remove this if your using this script, this is a hard-coded compensation for my taxes.
total_BTC-=0.11362
print("BTC Income:", round(income_btc_in_NOK, 2), "NOK")
print("ETH Income:", round(income_eth_in_NOK, 2), "NOK")
print("Total Income:", round(income_eth_in_NOK + income_btc_in_NOK), "NOK")
print()
capital_btc_in_NOK = USD_NOK_dict[final_date_USD_NOK]*bitcoin_dict[final_date_crypto]*total_BTC
capital_eth_in_NOK = USD_NOK_dict[final_date_USD_NOK]*ethereum_dict[final_date_crypto]*total_ETH
capital_in_NOK = capital_btc_in_NOK + capital_eth_in_NOK
print("BTC Capital:")
print(round(total_BTC, 6), "BTC")
print(round(capital_btc_in_NOK, 2), "NOK")
print()
print("ETH Capital:")
print(round(total_ETH, 4), "ETH")
print(round(capital_eth_in_NOK, 2), "NOK")
print()
print("Total capital:", round(capital_in_NOK), "NOK")
main() | 37.307692 | 120 | 0.65567 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,429 | 0.267854 |
6f21c952ba1d6ad55821e054cf4f9e1bcc0cbef5 | 1,222 | py | Python | SymBOP_Analysis/ql_global.py | duttm/Octahedra_Nanoparticle_Project | aebee2859e104071a1a6f5f46b42ddc9bd2fa5ad | [
"MIT"
]
| null | null | null | SymBOP_Analysis/ql_global.py | duttm/Octahedra_Nanoparticle_Project | aebee2859e104071a1a6f5f46b42ddc9bd2fa5ad | [
"MIT"
]
| null | null | null | SymBOP_Analysis/ql_global.py | duttm/Octahedra_Nanoparticle_Project | aebee2859e104071a1a6f5f46b42ddc9bd2fa5ad | [
"MIT"
]
| null | null | null | import numpy as np
import scipy.special as ss
import pathlib
from Particle import Particle
def ql_global(l, particles):
# Keep only particles that have neighbors (this was changed 5/23/2020)
particles = [i for i in particles if len(Particle.data[i].neighs)>0]
neigh_total = sum([len(Particle.data[i].neighs) for i in particles])
if isinstance(l, int):
if len(particles)!=0:
# average slmbar weighted by the number of neighbors
Qlmbar = list(sum([np.array(Particle.data[p].qlmbar[l], dtype=complex)*len(Particle.data[p].neighs)/neigh_total for p in particles]))
Qlmtilde = list(sum([np.array(Particle.data[p].qlmtilde[l], dtype=complex)*len(Particle.data[p].neighs)/neigh_total for p in particles]))
if l in Particle.qlmbar_ideal:
Ql = np.abs(np.sqrt((4*np.pi/(2*l+1))*np.vdot(np.array(Qlmtilde, dtype=complex), np.array(Qlmtilde, dtype=complex))))
else:
Qlmbar_mag_sq = np.abs(np.vdot(np.array(Qlmbar, dtype=complex), np.array(Qlmbar, dtype=complex)))
Ql = np.abs(np.sqrt((4*np.pi/(2*l+1))*Qlmbar_mag_sq))
D = np.sqrt(Qlmbar_mag_sq)
else:
Qlmbar = [0]*(2*l+1)
Qlmtilde = [0]*(2*l+1)
Ql = 0.0
return [Ql, Qlmbar, Qlmtilde]
| 20.366667 | 140 | 0.672668 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.099836 |
6f22dd259e43cf8dd03f6e436b63e23ee3c3c16a | 133 | py | Python | mycelium/__init__.py | suet-lee/mycelium | db83cd3ab00697f28b2def2cebcdef52698fdd92 | [
"Apache-2.0"
]
| 6 | 2021-05-23T17:36:02.000Z | 2022-01-21T20:34:17.000Z | mycelium/__init__.py | suet-lee/mycelium | db83cd3ab00697f28b2def2cebcdef52698fdd92 | [
"Apache-2.0"
]
| null | null | null | mycelium/__init__.py | suet-lee/mycelium | db83cd3ab00697f28b2def2cebcdef52698fdd92 | [
"Apache-2.0"
]
| 1 | 2021-06-17T20:35:10.000Z | 2021-06-17T20:35:10.000Z | from .switch import EKFSwitch, RelaySwitch, InitialModeSwitch
from .camera_t265 import CameraT265
from .camera_d435 import CameraD435 | 44.333333 | 61 | 0.864662 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6f24922c982451aa56d071ba87202ae9a17e9ae3 | 1,030 | py | Python | arsenal/sleep/openfaas/sleep-py/handler.py | nropatas/faasbenchmark | 99f08c70a0ddaa8e9dcadb092b2c395318a6e215 | [
"Apache-2.0"
]
| null | null | null | arsenal/sleep/openfaas/sleep-py/handler.py | nropatas/faasbenchmark | 99f08c70a0ddaa8e9dcadb092b2c395318a6e215 | [
"Apache-2.0"
]
| null | null | null | arsenal/sleep/openfaas/sleep-py/handler.py | nropatas/faasbenchmark | 99f08c70a0ddaa8e9dcadb092b2c395318a6e215 | [
"Apache-2.0"
]
| null | null | null | import os
import time
import datetime
def get_current_epoch():
return int((datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds() * 1000)
def get_sleep_parameter(event):
user_input = str(event.query["sleep"])
if not user_input or not user_input.isdigit() or int(user_input) < 0:
return {"error": "invalid sleep parameter"}
return int(user_input)
def run_test(sleep_time):
time.sleep(sleep_time / 1000.0)
def is_warm():
is_warm = os.environ.get("warm") == "true"
os.environ["warm"] = "true"
return is_warm
def handle(event):
start = get_current_epoch()
reused = is_warm()
sleep_time = get_sleep_parameter(event)
if type(sleep_time) != int:
return {
"statusCode": 200,
"body": sleep_time
}
run_test(sleep_time)
duration = (get_current_epoch() - start) * 1000000
return {
"statusCode": 200,
"body": {
"duration": duration,
"reused": reused
}
}
| 23.953488 | 99 | 0.615534 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.113592 |
6f24c0d9627e8e593e0f3f03a5c6df58f6f65c2e | 2,922 | py | Python | lib/vapi_cli/users.py | nogayama/vision-tools | f3041b519f30037d5b6390bce36a7f5efd3ed6ae | [
"Apache-2.0"
]
| 15 | 2020-03-22T18:25:27.000Z | 2021-12-03T05:49:32.000Z | lib/vapi_cli/users.py | nogayama/vision-tools | f3041b519f30037d5b6390bce36a7f5efd3ed6ae | [
"Apache-2.0"
]
| 8 | 2020-04-04T18:11:56.000Z | 2021-07-27T18:06:47.000Z | lib/vapi_cli/users.py | nogayama/vision-tools | f3041b519f30037d5b6390bce36a7f5efd3ed6ae | [
"Apache-2.0"
]
| 19 | 2020-03-20T23:36:32.000Z | 2022-01-10T20:38:48.000Z | #!/usr/bin/env python3
# IBM_PROLOG_BEGIN_TAG
#
# Copyright 2019,2020 IBM International Business Machines Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# IBM_PROLOG_END_TAG
import logging as logger
import sys
import vapi
import vapi_cli.cli_utils as cli_utils
from vapi_cli.cli_utils import reportSuccess, reportApiError, translate_flags
# All of Vision Tools requires python 3.6 due to format string
# Make the check in a common location
if sys.hexversion < 0x03060000:
sys.exit("Python 3.6 or newer is required to run this program.")
token_usage = """
Usage:
users token --user=<user-name> --password=<password>
Where:
--user Required parameter containing the user login name
--password Required parameter containing the user's password
Gets an authentication token for the given user"""
server = None
# --- Token Operation ----------------------------------------------
def token(params):
""" Handles getting an authentication token for a specific user"""
user = params.get("--user", None)
pw = params.get("--password", None)
rsp = server.users.get_token(user, pw)
if rsp is None or rsp.get("result", "fail") == "fail":
reportApiError(server, f"Failed to get token for user '{user}'")
else:
reportSuccess(server, rsp["token"])
cmd_usage = f"""
Usage: users {cli_utils.common_cmd_flags} <operation> [<args>...]
Where:
{cli_utils.common_cmd_flag_descriptions}
<operation> is required and must be one of:
token -- gets an authentication token for the given user
Use 'users <operation> --help' for more information on a specific command."""
usage_stmt = {
"usage": cmd_usage,
"token": token_usage
}
operation_map = {
"token": token
}
def main(params, cmd_flags=None):
global server
args = cli_utils.get_valid_input(usage_stmt, operation_map, argv=params, cmd_flags=cmd_flags)
if args is not None:
# When requesting a token, we need to ignore any existing token info
if args.cmd_params["<operation>"] == "token":
cli_utils.token = ""
try:
server = vapi.connect_to_server(cli_utils.host_name, cli_utils.token)
except Exception as e:
print("Error: Failed to setup server.", file=sys.stderr)
logger.debug(e)
return 1
args.operation(args.op_params)
if __name__ == "__main__":
main(None)
| 29.816327 | 97 | 0.687543 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,770 | 0.605749 |
6f25add3846c5ac4302faa8959401e3328e32572 | 2,223 | py | Python | smile_recognition.py | audreymychan/djsmile | 8dc5d6337f1b32db8bf3dfbf13315ec25049ebb5 | [
"MIT"
]
| 5 | 2019-05-30T20:15:34.000Z | 2020-04-16T08:21:16.000Z | smile_recognition.py | audreymychan/djsmile | 8dc5d6337f1b32db8bf3dfbf13315ec25049ebb5 | [
"MIT"
]
| 5 | 2021-08-25T14:43:34.000Z | 2022-02-10T00:14:09.000Z | smile_recognition.py | audreymychan/djsmile | 8dc5d6337f1b32db8bf3dfbf13315ec25049ebb5 | [
"MIT"
]
| null | null | null | # This script loads the pre-trained scaler and models and contains the
# predict_smile() function to take in an image and return smile predictions
import joblib
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array, array_to_img
from PIL import Image
import numpy as np
# Set new frame size dimensions
img_width, img_height = (100, 100)
# Scaler and model imports
scaler = joblib.load('./models/scaler.save')
model = load_model('./models/my_model.h5')
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
def predict_smile(gray_img, box, count):
"""Make prediction on a new image whether a person is smiling or not.
Parameters
----------
gray_img : numpy.ndarray of dtype int
Grayscale image in numpy.ndarray of current frame.
box : tuple
(left, top, right, bottom) locating face bounding box in pixel locations.
count : int
Number of faces detected in current frame.
Returns
-------
numpy.ndarray of dtype float
Probabilities of no smile (second number) and smile (first number).
i.e. array([[0.972528 , 0.02747207]], dtype=float32)
"""
# Save a copy of current frame
gray_img = gray_img.reshape(gray_img.shape+(1,)) # (height, width, 1)
array_to_img(gray_img).save(f'./images/temp/current_frame_{count}.jpg')
# Load image
gray_img = Image.open(f'./images/temp/current_frame_{count}.jpg')
# Crop face, resize to 100x100 pixels, and save a copy
face_crop = gray_img.resize((img_width, img_height), box=box)
face_crop.save(f'./images/temp/face_crop_current_frame_{count}.jpg')
# Load image and convert to np.array
face_crop = Image.open(f'./images/temp/face_crop_current_frame_{count}.jpg')
new_face_array = np.array(img_to_array(face_crop)) # (100, 100, 1)
# Reshape
new_face_array = new_face_array.reshape(1, img_width*img_height) # (1, 10_000)
# Transform with pre-trained scaler
new_face_array = scaler.transform(new_face_array)
new_face_array = new_face_array.reshape(1, img_width, img_height, 1) # (1, 100, 100, 1)
return model.predict(new_face_array)
| 35.285714 | 92 | 0.706253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,284 | 0.577598 |
6f25c6dda6bae99b736764ebd22f5be07aae919e | 1,054 | py | Python | comicstreamerlib/gui_qt.py | rlugojr/ComicStreamer | 62eb914652695ea41a5e1f0cfbd044cbc6854e84 | [
"Apache-2.0"
]
| 169 | 2015-01-08T03:23:37.000Z | 2022-02-27T22:09:25.000Z | comicstreamerlib/gui_qt.py | gwhittey23/ComicStreamer | 3e0fe2011984cee54197985cb313f5b6864f6f8c | [
"Apache-2.0"
]
| 46 | 2015-01-10T23:47:51.000Z | 2020-05-31T01:04:28.000Z | comicstreamerlib/gui_qt.py | gwhittey23/ComicStreamer | 3e0fe2011984cee54197985cb313f5b6864f6f8c | [
"Apache-2.0"
]
| 94 | 2015-01-26T01:57:52.000Z | 2022-01-25T17:11:31.000Z | import sys
import webbrowser
import os
from comicstreamerlib.folders import AppFolders
from PyQt4 import QtGui,QtCore
class SystemTrayIcon(QtGui.QSystemTrayIcon):
def __init__(self, icon, app):
QtGui.QSystemTrayIcon.__init__(self, icon, None)
self.app = app
self.menu = QtGui.QMenu(None)
exitAction = self.menu.addAction("Exit")
self.setContextMenu(self.menu)
exitAction.triggered.connect( self.quit )
def quit(self):
QtCore.QCoreApplication.quit()
class QtBasedGui():
def __init__(self, apiServer):
self.apiServer = apiServer
self.app = QtGui.QApplication(sys.argv)
pixmap = QtGui.QPixmap(AppFolders.imagePath("trout.png"))
icon = QtGui.QIcon( pixmap.scaled(16,16))
self.trayIcon = SystemTrayIcon(icon,self)
self.trayIcon.show()
def run(self):
try:
self.app.exec_()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
QtGui().run()
| 23.422222 | 65 | 0.624288 | 874 | 0.829222 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.025617 |
6f276dd2fdcae04762736c35013f0dd614ff7db4 | 3,892 | py | Python | laserchicken/io/las_handler.py | eEcoLiDAR/eEcoLiDAR | f5c4e772e4893f7242ed0b10aa17ac7e693a55a0 | [
"Apache-2.0"
]
| null | null | null | laserchicken/io/las_handler.py | eEcoLiDAR/eEcoLiDAR | f5c4e772e4893f7242ed0b10aa17ac7e693a55a0 | [
"Apache-2.0"
]
| 104 | 2017-09-07T08:06:49.000Z | 2018-04-16T09:17:18.000Z | laserchicken/io/las_handler.py | eEcoLiDAR/eEcoLiDAR | f5c4e772e4893f7242ed0b10aa17ac7e693a55a0 | [
"Apache-2.0"
]
| 2 | 2017-11-17T17:23:04.000Z | 2017-12-15T07:13:20.000Z | """ IO Handler for LAS (and compressed LAZ) file format """
import laspy
import numpy as np
from laserchicken import keys
from laserchicken.io.base_io_handler import IOHandler
from laserchicken.io.utils import convert_to_short_type, select_valid_attributes
DEFAULT_LAS_ATTRIBUTES = {
'x',
'y',
'z',
'intensity',
'gps_time',
'raw_classification',
}
class LASHandler(IOHandler):
""" Class for IO of point-cloud data in LAS file format """
def read(self, attributes=DEFAULT_LAS_ATTRIBUTES):
"""
Load the points from a LAS(LAZ) file into memory.
:param attributes: list of attributes to read ('all' for all attributes in file)
:return: point cloud data structure
"""
file = laspy.read(self.path)
dtype = file.header.point_format.dtype()
attributes_available = [el if el not in ['X', 'Y', 'Z'] else el.lower()
for el in dtype.fields.keys()]
attributes = select_valid_attributes(attributes_available, attributes)
points = {}
for name in attributes:
if hasattr(file, name):
file_data = getattr(file, name)
data = np.zeros_like(file_data)
data[:] = file_data
points[name] = _get_attribute(data, data.dtype.name)
return {keys.point: points}
def write(self, point_cloud, attributes='all', file_version='1.2', point_format=3):
"""
Write point cloud to a LAS(LAZ) file.
:param point_cloud:
:param attributes: list of attributes to write ('all' for all attributes in point_cloud)
:param file_version:
:param point_format:
:return:
"""
file = laspy.create(point_format=point_format,
file_version=file_version)
points = point_cloud[keys.point]
attributes = select_valid_attributes([attr for attr in points.keys()], attributes)
# NOTE: adding extra dims and assignment should be done in two steps,
# some fields (e.g. raw_classification) are otherwise overwritten
dtype = file.header.point_format.dtype()
for attribute in attributes:
data, type = _get_data_and_type(points[attribute])
type_short = convert_to_short_type(type)
if attribute not in 'xyz':
# x,y,z are not there but file methods can be used to convert coords to int4
if attribute not in dtype.fields:
param = laspy.ExtraBytesParams(name=attribute, type=type)
file.add_extra_dim(param)
file_type_short = convert_to_short_type(getattr(file, attribute).dtype.name)
if not file_type_short == type_short:
raise TypeError('Data type in file does not match the one in point cloud: '
'for {}, {} vs {}'.format(attribute, file_type_short, type_short))
for dim in 'xyz':
data, _ = _get_data_and_type(points[dim])
setattr(file.header, '{}_offset'.format(dim), data.min())
setattr(file.header, '{}_scale'.format(dim), 0.001)
for attribute in attributes:
data, _ = _get_data_and_type(points[attribute])
if data.size == 0:
raise ValueError('Cannot write empty point-cloud!')
else:
setattr(file, attribute, data)
try:
file.write(self.path)
except ValueError as err:
raise ValueError('Error in writing LAS file (file_version {}, point_format_id {}). '
'laspy error below:\n{}'.format(file_version, point_format, err))
def _get_attribute(data, data_type):
return {'type': data_type, 'data': data}
def _get_data_and_type(attribute):
return attribute['data'], attribute['type']
| 37.423077 | 98 | 0.610226 | 3,345 | 0.859455 | 0 | 0 | 0 | 0 | 0 | 0 | 1,122 | 0.288284 |
6f29a0478e6fdf417f21eeca439c92961dbbacca | 1,206 | py | Python | prob.py | Y1fanHE/po_with_moead-levy | d0531c9685ea1a09dd074960b51756d8f19a9719 | [
"MIT"
]
| 7 | 2020-09-02T12:40:58.000Z | 2021-09-17T09:39:09.000Z | prob.py | Y1fanHE/po_with_moead-levy | d0531c9685ea1a09dd074960b51756d8f19a9719 | [
"MIT"
]
| null | null | null | prob.py | Y1fanHE/po_with_moead-levy | d0531c9685ea1a09dd074960b51756d8f19a9719 | [
"MIT"
]
| null | null | null | import numpy as np
import pandas as pd
def read_file(prob_num):
df = pd.read_csv("dat/port" + str(prob_num) + ".txt", header=None,
delimiter="\s+", names=range(3)) # info on assets
n = int(df[0][0]) # number of assets
r = df[1: (n + 1)][0].values.reshape(n, 1) # mean of returns
s = df[1: (n + 1)][1].values.reshape(n, 1) # std. of returns
df = df.values
c = np.zeros((n, n))
for it in np.arange(n,len(df)):
i, j = int(df[it][0] - 1), int(df[it][1] - 1)
c[i][j] = c[j][i] = df[it][2] # covariance between asset i, j
return n, r, s, c
def evaluate(x, r, s, c):
M = - np.sum(np.dot(x.T, r)) # obj. 1: -1 * mean as return
V = np.sum(np.dot(x, x.T) * np.dot(s, s.T) * c) # obj. 2: variance as risk
return M, V
def pf(prob_num):
pf = np.genfromtxt("dat/portef" + str(prob_num) + ".txt") # points on pf
M = []
V = []
for i in range(len(pf)):
M += [pf[i][0]]
V += [pf[i][1]]
return M, V
def set(instance):
n, r, s, c = read_file(instance)
lb, ub = np.zeros((n, 1)), np.ones((n, 1)) # upper and lower bounds
port = evaluate
mp, vp = pf(instance)
return n, r, s, c, lb, ub, port, mp, vp
| 29.414634 | 78 | 0.529022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.191542 |
6f2a6d704873d5624524e8309be808576dfeefc1 | 277 | py | Python | nmr/testing_PyBMRB.py | jameshtwose/han_jms_collabs | ee5cdb73b3e14e7f1f1e225dbc6a7d7d2b1b5b73 | [
"CC-BY-4.0"
]
| null | null | null | nmr/testing_PyBMRB.py | jameshtwose/han_jms_collabs | ee5cdb73b3e14e7f1f1e225dbc6a7d7d2b1b5b73 | [
"CC-BY-4.0"
]
| null | null | null | nmr/testing_PyBMRB.py | jameshtwose/han_jms_collabs | ee5cdb73b3e14e7f1f1e225dbc6a7d7d2b1b5b73 | [
"CC-BY-4.0"
]
| null | null | null | from pybmrb import Spectra, Histogram
import plotly.io as pio
pio.renderers.default = "browser"
peak_list=Spectra.n15hsqc(bmrb_ids=15060, legend='residue')
peak_list=Spectra.c13hsqc(bmrb_ids=15060, legend='residue')
peak_list=Spectra.tocsy(bmrb_ids=15060, legend='residue') | 27.7 | 59 | 0.801444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.129964 |
6f2c2c62e843e5ddae5061bd51b492b090cca398 | 10,511 | py | Python | parser.py | sberczuk/powerschool-reporter | 2393d9f63ffe643499f6cbf2bf406f3c4d311129 | [
"MIT"
]
| 1 | 2021-03-04T20:11:08.000Z | 2021-03-04T20:11:08.000Z | parser.py | sberczuk/powerschool-reporter | 2393d9f63ffe643499f6cbf2bf406f3c4d311129 | [
"MIT"
]
| null | null | null | parser.py | sberczuk/powerschool-reporter | 2393d9f63ffe643499f6cbf2bf406f3c4d311129 | [
"MIT"
]
| 1 | 2021-03-04T20:11:13.000Z | 2021-03-04T20:11:13.000Z | #!/usr/bin/env python3
import io
import xml.etree.ElementTree as ET
import argparse
ns = {'ns1': 'http://www.sifinfo.org/infrastructure/2.x',
'ns2': 'http://stumo.transcriptcenter.com'}
class StudentInfo:
def __init__(self, first_name, middle_name, last_name, ):
self.last_name = last_name
self.middle_name = middle_name
self.first_name = first_name
class Grade:
"""A Wrapper for a single grade"""
def __init__(self, year, grade_level, term, course_code, title, letter_grade, number_grade, comments, teacher_fn,
teacher_ln, school_name):
self.teacher_ln = teacher_ln
self.teacher_fn = teacher_fn
self.comments = comments
self.number_grade = number_grade
# Special case for the Pandemic special grading
self.letter_grade = letter_grade if letter_grade is not None else "n/a"
self.course_title = title
self.course_code = course_code
self.term = term
self.year = year
self.grade_level = grade_level
self.school = school_name
if (self.comments != None):
self.comments = self.comments.strip()
def __str__(self) -> str:
return f"{self.year}-{self.term} (code: {self.course_code}) {self.course_title} {self.letter_grade}, {self.number_grade}, {self.comments}, {self.teacher_fn}, {self.teacher_ln} {self.school}"
def pretty_print(self):
return f"###{self.year}-{self.term} {self.school} {self.grade_level} (code: {self.course_code}) {self.course_title} | Instructor: {self.teacher_fn} {self.teacher_ln}\n*Letter Grade*: {self.letter_grade} | *Grade:* {self.number_grade}\nComments: {self.comments} "
def print_description(self):
return f"{self.year}-{self.term} {self.school} {self.grade_level} (code: {self.course_code}) {self.course_title}\nInstructor: {self.teacher_fn} {self.teacher_ln}\nLetter Grade: {self.letter_grade} \nGrade: {self.number_grade}\nComments:{self.comments} "
def print_header(self):
return f"{self.course_title} (code: {self.course_code}) {self.school}\n"
def print_term_grade(self):
return f"<em>{self.year}-{self.term} {self.grade_level}</em><br/>\n<b>{self.letter_grade}</b> / <b>{self.number_grade}</b>"
def reporting_period(self):
return f"{self.year}-{self.term}"
def teacher_name(self):
return f"{self.teacher_fn} {self.teacher_ln}"
def format_comments(self):
if (self.comments != None):
return self.comments.replace('\n', ' ')
else:
return ''
def process_course(course, year):
title = course.find(".//ns1:CourseTitle", ns).text
course_code = course.find(".//ns1:CourseCode", ns).text
mark_data = course.find(".//ns1:MarkData", ns)
grade_level = course.find(".//ns1:GradeLevelWhenTaken/ns1:Code", ns).text
letter_grade = mark_data.find("ns1:Letter", ns).text
number_grade = mark_data.find("ns1:Percentage", ns).text
comments = mark_data.find("ns1:Narrative", ns).text
# get extended info
extended_info = course.find("ns1:SIF_ExtendedElements", ns)
term = extended_info.find("ns1:SIF_ExtendedElement[@Name='StoreCode']", ns).text
teacher_fn = extended_info.find("ns1:SIF_ExtendedElement[@Name='InstructorFirstName']", ns).text
teacher_ln = extended_info.find("ns1:SIF_ExtendedElement[@Name='InstructorLastName']", ns).text
school_name = extended_info.find("ns1:SIF_ExtendedElement[@Name='SchoolName']", ns).text
return Grade(year, grade_level, term, course_code, title, letter_grade, number_grade, comments, teacher_fn,
teacher_ln, school_name)
# Placeholder for markdown format for a list of grades
# Take the list and sort it with appropriate headers.
# TBD if we need to figure pass in meta data, whether we figure it out, or if we make assumptions.
def format_as_markdown(grades):
pass
def process_data(xmlDataFile):
xml_string = extractValidXML(xmlDataFile)
root = ET.fromstring(xml_string)
ns_name = '{0}StudentDemographicRecord/{0}StudentPersonalData/{0}Name'.format('ns1:')
name = root.find(ns_name, namespaces=ns)
fn = name.find("ns1:FirstName", namespaces=ns).text
mi = name.find("ns1:MiddleName", namespaces=ns).text
ln = name.find("ns1:LastName", namespaces=ns).text
(grades, years) = collect_grades(root)
return (StudentInfo(fn, mi, ln), grades, years)
def generate_year_report(student_info, year, grades_by_course, schools, terms):
output = io.StringIO()
# Write Report Card Header
output.write(f"<h1> {student_info.first_name} {student_info.middle_name} {student_info.last_name}</h1>\n")
output.write(f"<h1> {year}</h1>\n")
for s in schools:
output.write(f"<h2>{s}</h2>")
for course in grades_by_course.keys():
output.write('<div class="course">\n')
output.write(f"<h2>{headers_by_course.get(course)}</h2>")
course_by_term = organize_by_term(grades_by_course[course])
grades_table = generate_grades_table(course_by_term, terms)
output.write(grades_table)
comments_table = generate_comments_table(course_by_term, terms)
output.write(comments_table)
output.write("</div>\n")
return output.getvalue()
def generate_grades_table(course_by_term, terms):
term_headers = sorted(terms)
with io.StringIO() as output:
output.write("<table class='grades'>")
output.write("<tr>")
for th in term_headers:
output.write(f"<th>{th}</th>")
output.write("</tr>")
output.write("<tr>")
for th in term_headers:
if (th in course_by_term):
g = course_by_term[th]
output.write(f"<td><em>{g.teacher_name()}</em><br/>\n{g.print_term_grade()}</td>")
else:
output.write(f"<td></td>")
output.write("</tr>")
output.write("</table>")
return output.getvalue()
def generate_comments_table(course_by_term, terms):
term_headers = sorted(terms)
with io.StringIO() as output:
output.write("<table class='comments'>")
output.write(f"<tr><th class='cbodyterm'>Term</th><th class='cbodytext'>Comments</th></tr>")
for th in term_headers:
output.write(f"<tr><td class='cbodyterm'>{th}</td>\n")
if (th in course_by_term):
g = course_by_term[th]
if g.comments != None:
output.write(f"<td class='cbodytext'>{g.format_comments()}</td>")
else:
output.write(f"<td class='cbodytext'></td>")
else:
output.write(f"<td class='cbodytext'></td>")
output.write("</table>")
return output.getvalue()
def collect_grades(root):
all_grades = []
all_years = []
findall = root.findall(".//ns1:Term", ns)
for term in findall:
year = term[0][0].text
if year not in all_years:
all_years.append(year)
for courses in term.iter("{http://www.sifinfo.org/infrastructure/2.x}Courses"):
for course in courses:
grade = process_course(course, year)
all_grades.append(grade)
return (all_grades, all_years)
def organize_by_term(grades):
grade_list = sorted(grades, key=lambda gg: gg.term)
grades_by_term = dict()
for grade in grade_list:
term = grade.term
if term not in grades_by_term:
grades_by_term[term] = []
grades_by_term[term] = grade
return grades_by_term
def organize_grades(all_grades):
allCoursesByName = set()
grades_by_course = dict()
grades_by_period = dict()
header_by_course = dict()
for grade in all_grades:
period = grade.reporting_period()
allCoursesByName.add(grade.course_title)
course_code = grade.course_code
if course_code not in grades_by_course:
grades_by_course[course_code] = []
if period not in grades_by_period:
grades_by_period[period] = []
grades_by_period[period].append(grade)
grades_by_course[course_code].append(grade)
header_by_course[course_code] = grade.print_header()
return (grades_by_course, grades_by_period, header_by_course)
def extractValidXML(inFile):
with open(inFile, 'r') as f:
return parse_file(f)
# concat all of the XML lines in the file, then return it
# Skip all up to the start of the XML
def parse_file(f):
result = ''
skip = True
for line in f:
if line.startswith('<?xml version="1.0" '):
skip = False
if not skip:
# This is a known issue: last line being incomplete
if (line.startswith('</StudentRec') and line != '</StudentRecordExchangeData>'):
line = '</StudentRecordExchangeData>'
result = result + line
return result
def generate_html_file(file_name, body_text):
css_text = ''
with open('reportCard.css') as css_file:
css_text = css_file.read()
with open(file_name, 'w') as f:
f.write("<html>\n<head>\n")
f.write(f"\n<style>{css_text}</style>\n")
f.write("</head>\n<body>\n")
f.write(body_text)
f.write("\n</body>\n</html>")
if __name__ == "__main__":
import sys
parser = argparse.ArgumentParser(description='Report Card Generator.')
parser.add_argument('--output_basename', action='store',
default='report_card',
help='Output file to report results to (default: standard out)')
# First arg is the data file
parser.add_argument('data_file')
args = parser.parse_args()
basename = args.output_basename
print("output = ", basename)
print("parsing ", args.data_file)
valid_xml = extractValidXML(args.data_file)
(student_info, grades, years) = process_data(args.data_file)
years.sort()
for year in years:
(grades_by_course, grades_by_period, headers_by_course) = organize_grades(
[a for a in grades if (a.year == year)])
print("*******************", year, "***************")
schools = [g.school for g in grades if (g.year == year)]
terms = [g.term for g in grades if (g.year == year)]
report_text = generate_year_report(student_info, year, grades_by_course, set(schools), set(terms))
file_name = f"{basename}-{year}.html"
generate_html_file(file_name, report_text)
| 38.083333 | 271 | 0.641328 | 2,389 | 0.227286 | 0 | 0 | 0 | 0 | 0 | 0 | 3,157 | 0.300352 |
6f2de6790116bc6ef41091db2832890bbce2457a | 2,623 | py | Python | eunite/eunite_data.py | jiasudemotuohe/deep_learning | 44eb14d91b6b9ca2092361918a1bcaa73786f78e | [
"MIT"
]
| null | null | null | eunite/eunite_data.py | jiasudemotuohe/deep_learning | 44eb14d91b6b9ca2092361918a1bcaa73786f78e | [
"MIT"
]
| null | null | null | eunite/eunite_data.py | jiasudemotuohe/deep_learning | 44eb14d91b6b9ca2092361918a1bcaa73786f78e | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# @Time : 2020-04-11 12:34
# @Author : speeding_moto
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
EUNITE_PATH = "dataset/eunite.xlsx"
PARSE_TABLE_NAME = "mainData"
def load_eunite_data():
"""
return the generated load data, include all the features wo handle
"""
data = open_file()
X, Y = generate_features(data)
return X.values, Y.values
def load_eunite_train_data():
X, Y = load_eunite_data()
trains_test_rate = int(len(X) * 0.7)
train_x = X[0: trains_test_rate]
train_y = Y[0: trains_test_rate]
test_x = X[trains_test_rate:]
test_y = Y[trains_test_rate:]
return train_x, train_y, test_x, test_y
def generate_features(df):
"""
parse the data, wo need to transfer the class number to ont_hot for our calculate later
"""
months = df["Month"]
days = df["Day"]
one_hot_months = cast_to_one_hot(months, n_classes=12)
days = cast_to_one_hot(days, n_classes=31)
one_hot_months = pd.DataFrame(one_hot_months)
days = pd.DataFrame(days)
df = pd.merge(left=df, right=one_hot_months, left_index=True, right_index=True)
df = pd.merge(left=df, right=days, left_index=True, right_index=True)
y = df['Max Load']
# think, maybe wo need to normalization the temperature data,
temperature = normalization(df['Temp'].values)
temperature = pd.DataFrame(temperature)
df = pd.merge(left=df, right=temperature, left_index=True, right_index=True)
drop_columns = ["ID", "Month", "Day", "Year", "Max Load", "Temp"]
df.drop(drop_columns, axis=1, inplace=True)
print(df[0:10], "\n", y[0])
return df, y
def normalization(data):
return (data - np.mean(data)) / np.max(np.abs(data))
def cast_to_one_hot(data, n_classes):
"""
cast the classifier data to one hot
"""
one_hot_months = np.eye(N=n_classes)[[data - 1]]
return one_hot_months
def show_month_temperature_load_image(df):
plt.title("relation of temperature and load")
max_load = df["Max Load"]
temp = df['Temp'] * 15
plt.plot(max_load)
plt.plot(temp)
plt.xlabel('time')
plt.annotate('temperature', xy=[200, 200], xytext=(300, 200))
plt.annotate('load', xy=[200, 600], xytext=(200, 800))
plt.show()
def open_file():
"""
open the eunite load excel file to return
"""
xlsx_file = pd.ExcelFile(EUNITE_PATH)
return xlsx_file.parse(PARSE_TABLE_NAME)
if __name__ == '__main__':
df = open_file()
show_month_temperature_load_image(df)
x, y = load_eunite_data()
print(x.shape)
| 22.808696 | 91 | 0.661456 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 621 | 0.236752 |
6f2ef602fc37c19ef3635c7ccba25fb1c352192a | 4,828 | py | Python | tests/test_fibsem.py | DeMarcoLab/piescope | ea7acf5b198b91e4923097711d55ca038763eba2 | [
"MIT"
]
| 4 | 2019-06-07T07:28:48.000Z | 2022-02-23T23:02:08.000Z | tests/test_fibsem.py | DeMarcoLab/PIEScope | ea7acf5b198b91e4923097711d55ca038763eba2 | [
"MIT"
]
| 44 | 2019-06-09T14:32:16.000Z | 2022-03-25T06:04:20.000Z | tests/test_fibsem.py | DeMarcoLab/piescope | ea7acf5b198b91e4923097711d55ca038763eba2 | [
"MIT"
]
| 3 | 2019-06-07T07:31:09.000Z | 2021-03-01T10:47:24.000Z | import numpy as np
import pytest
from piescope.data.mocktypes import MockAdornedImage
import piescope.fibsem
autoscript = pytest.importorskip(
"autoscript_sdb_microscope_client", reason="Autoscript is not available."
)
try:
from autoscript_sdb_microscope_client import SdbMicroscopeClient
microscope = SdbMicroscopeClient()
microscope.connect("localhost")
except Exception as e:
pytest.skip("AutoScript cannot connect to localhost, skipping all AutoScript tests.",
allow_module_level=True)
def test_initialize():
"""Test connecting to the microscope offline with localhost."""
microscope = piescope.fibsem.initialize("localhost")
@pytest.fixture
def microscope():
from autoscript_sdb_microscope_client import SdbMicroscopeClient
microscope = SdbMicroscopeClient()
microscope.connect("localhost")
return microscope
@pytest.fixture
def image():
image_array = np.random.random((10, 10))
return MockAdornedImage(image_array, pixelsize_x=1e-6, pixelsize_y=1e-6)
def test_move_to_light_microscope(microscope):
original_position = microscope.specimen.stage.current_position
final_position = piescope.fibsem.move_to_light_microscope(microscope)
assert np.isclose(final_position.x, original_position.x + 50e-3, atol=1e-7)
assert np.isclose(final_position.y, original_position.y + 0.)
assert np.isclose(final_position.z, original_position.z)
assert np.isclose(final_position.r, original_position.r)
assert np.isclose(final_position.t, original_position.t)
def test_move_to_electron_microscope(microscope):
original_position = microscope.specimen.stage.current_position
final_position = piescope.fibsem.move_to_electron_microscope(microscope)
assert np.isclose(final_position.x, original_position.x - 50e-3, atol=1e-7)
assert np.isclose(final_position.y, original_position.y - 0.)
assert np.isclose(final_position.z, original_position.z)
assert np.isclose(final_position.r, original_position.r)
assert np.isclose(final_position.t, original_position.t)
def test_new_ion_image(microscope):
result = piescope.fibsem.new_ion_image(microscope)
assert microscope.imaging.get_active_view() == 2
assert result.data.shape == (884, 1024)
def test_new_electron_image(microscope):
result = piescope.fibsem.new_electron_image(microscope)
assert microscope.imaging.get_active_view() == 1
assert result.data.shape == (884, 1024)
def test_last_ion_image(microscope):
result = piescope.fibsem.last_ion_image(microscope)
assert microscope.imaging.get_active_view() == 2
assert result.data.shape == (884, 1024)
def test_last_electron_image(microscope):
result = piescope.fibsem.last_electron_image(microscope)
assert microscope.imaging.get_active_view() == 1
assert result.data.shape == (884, 1024)
def test_create_rectangular_pattern(microscope, image):
x0 = 2
x1 = 8
y0 = 3
y1 = 7
depth = 1e-6
output = piescope.fibsem.create_rectangular_pattern(
microscope, image, x0, x1, y0, y1, depth)
expected_center_x = 0
expected_center_y = 0
expected_width = 6e-6
expected_height = 4e-6
assert np.isclose(output.center_x, expected_center_x)
assert np.isclose(output.center_y, expected_center_y)
assert np.isclose(output.width, expected_width)
assert np.isclose(output.height, expected_height)
assert np.isclose(output.depth, depth) # depth is unchanged
assert np.isclose(output.rotation, 0) # no rotation by befault
def test_empty_rectangular_pattern(microscope, image):
x0 = None
x1 = None
y0 = 3
y1 = 7
depth = 1e-6
output = piescope.fibsem.create_rectangular_pattern(
microscope, image, x0, x1, y0, y1, depth)
assert output is None
@pytest.mark.parametrize(
"coord, expected_output",
[
([5, 5], [0, 0]),
([6, 5], [1e-6, 0]),
([5, 4], [0, 1e-6]),
([6, 4], [1e-6, 1e-6]),
([4, 6], [-1e-6, -1e-6]),
([4, 4], [-1e-6, 1e-6]),
([6, 6], [1e-6, -1e-6]),
],
)
def test_pixel_to_realspace_coordinate(image, coord, expected_output):
result = piescope.fibsem.pixel_to_realspace_coordinate(coord, image)
assert np.allclose(np.array(result), np.array(expected_output))
def test_autocontrast(microscope):
# This test checks autocontrast does not hit an error
piescope.fibsem.autocontrast(microscope)
@pytest.mark.parametrize(
"resolution",
[
("1536x1024"),
("3072x2048"),
("6144x4096"),
("768x512"),
],
)
def test_update_camera_settings(resolution):
dwell_time = 1e-7
output = piescope.fibsem.update_camera_settings(dwell_time, resolution)
assert output.dwell_time == dwell_time
assert output.resolution == resolution
| 31.763158 | 89 | 0.719553 | 0 | 0 | 0 | 0 | 1,225 | 0.253728 | 0 | 0 | 407 | 0.0843 |
6f2f4a5690de443a3e4f39e964bc36f35fd2bc86 | 8,206 | py | Python | newnew.py | jennycs005/Skyscraper-App | 53d69e005bec17a033be6ea1274e8f7372ed8b28 | [
"MIT"
]
| null | null | null | newnew.py | jennycs005/Skyscraper-App | 53d69e005bec17a033be6ea1274e8f7372ed8b28 | [
"MIT"
]
| null | null | null | newnew.py | jennycs005/Skyscraper-App | 53d69e005bec17a033be6ea1274e8f7372ed8b28 | [
"MIT"
]
| null | null | null | import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import csv
import numpy as np
import pydeck as pdk
from PIL import Image
def scatterplot():
skyscrapers_data = pd.read_csv("Skyscrapers2021.csv")
completion_year_List = []
meters_List = []
for row in skyscrapers_data:
# st.write(row)
completion_year = pd.to_numeric(skyscrapers_data.COMPLETION)
# print(completion_year)
completion_year_List.append(completion_year)
meters = skyscrapers_data.Meters.str.replace(r'\s+m', '').astype(float)
meters_List.append(meters)
plt.xlabel("Completion Year",fontsize=10)
plt.ylabel("Meters",fontsize=10)
plt.title("Height & Numbers along with completion year",fontsize=13)
plt.scatter(completion_year_List, meters_List, alpha=0.3, marker=".", color="cornflowerblue")
plt.show()
#return plt
#这个就是你之前的rank_map函数 我改名字为whole_mao(),为了和下面的rank_map()区分
def whole_map():
skyscrapers_data = pd.read_csv("Skyscrapers2021.csv")
sky_df = pd.DataFrame(skyscrapers_data, columns=["RANK", "Latitude", "Longitude"])
sky_df.rename(columns={"Latitude": "lat", "Longitude": "lon"},inplace=True)
st.map(sky_df)
#新rank_map()函数,显示按照rank选择出来的大楼
def rank_map(select_rank):
skyscrapers_data = pd.read_csv("Skyscrapers2021.csv")
sky_df = pd.DataFrame(skyscrapers_data, columns=["RANK", "CITY", "Latitude", "Longitude"])
sky_df.rename(columns={"Latitude": "lat", "Longitude": "lon"}, inplace=True)
select_rank_max = select_rank + 19
rank_df = sky_df[(sky_df['RANK'] >= select_rank) & (sky_df['RANK'] <= select_rank_max)]
#在sidebar展示数据表格
rank_df_show = pd.DataFrame(rank_df, columns=['CITY', 'lon', 'lat'])
st.sidebar.table(rank_df_show)
#画出地图
st.pydeck_chart(pdk.Deck(
map_style = 'mapbox://styles/mapbox/light-v9',
initial_view_state=pdk.ViewState(
latitude=rank_df['lat'].mean(),
longitude=rank_df['lon'].mean(),
zoom=1,
pitch=0
),
layers = [
pdk.Layer(
'HexagonLayer',
data=rank_df,
get_position = '[lon, lat]',
radius = 200000,
elevation_scale = 10000,
elevation_range = [400,1000],
pickable = True,
extruded = True,
),
pdk.Layer(
'ScatterplotLayer',
data=rank_df,
get_position='[lon, lat]',
get_color = '[200, 30, 0, 160]',
get_radius = 200000,
),
],
))
#按照选择年份画出平均高度的折线图
def average_height_line_chart(select_year):
average_height_df =pd.DataFrame(columns=['Year', 'AverageHeight'])
skyscrapers_data = pd.read_csv("Skyscrapers2021.csv")
year_df = pd.DataFrame(skyscrapers_data, columns=["COMPLETION", "Meters"])
year_df.COMPLETION = pd.to_numeric(year_df.COMPLETION)
year_df.Meters = year_df.Meters.str.replace(r'\s+m', '').astype(float)
year_df = year_df[year_df['COMPLETION']<= select_year]
year_df.sort_values(by = ['COMPLETION'], ascending = True, inplace= True)
for year in range(1931, select_year):#之前你这写的是1931~1991,估计这是为什么只显示到1991吧
mean_height = (year_df[['Meters']][year_df['COMPLETION'] <= year]).mean()
a = {'Year': year, 'AverageHeight': mean_height}
average_height_df = average_height_df.append(a, ignore_index=True)
plt.xlabel("Completion Year",fontsize=10)
plt.ylabel("Average Height",fontsize=10)
plt.title("Average Height along with completion year",fontsize=13)
plt.plot(average_height_df.Year, average_height_df.AverageHeight)
plt.show()
def statisticchart(selection):
if selection == "By Function":
fp = open('Skyscrapers2021.csv', 'r')
reader = csv.reader(fp)
count = 0
d = {'office': 0, 'hotel': 0, 'residential': 0, 'hotel / office': 0, 'residential / office': 0,
'multifunction': 0}
for row in reader:
if count > 0:
label = row[12]
if label == 'office':
d['office'] += 1
elif label == 'hotel':
d['hotel'] += 1
elif label == 'residential':
d['residential'] += 1
elif label == 'hotel / office':
d['hotel / office'] += 1
elif label == 'residential / office':
d['residential / office'] += 1
else:
d['multifunction'] += 1
count += 1
label = []
values = []
for key in d:
label.append(key)
values.append(d[key])
EXPLODE_VALUE = 0.1
max_percentage = max(d.values())
max_percentage_index = values.index(max_percentage)
explode_values = [0] * len(label)
explode_values[max_percentage_index] = EXPLODE_VALUE
colors = ["skyblue", "cadetblue", "cornflowerblue","powderblue","steelblue","lightslategray"]
plt.pie(values, labels=label, colors=colors,explode=explode_values, autopct='%1.1f%%', startangle=90,
textprops={'fontsize': 10})
plt.show()
plt.rcParams.update({"font.size": 7})
plt.legend(loc="lower right", bbox_to_anchor=(1.5, 0))
plt.show()
else:
skyscrapers_data = pd.read_csv("Skyscrapers2021.csv")
material_description = {}
for i in skyscrapers_data["MATERIAL"]:
if i in material_description:
material_description[i] += 1
else:
material_description[i] = 1
material_Percentage_Value = material_description.values()
labels = material_description.keys()
mfunction = [x for x in material_Percentage_Value]
st.set_option('deprecation.showPyplotGlobalUse', False)
EXPLODE_VALUE = 0.1
max_percentage = max(material_Percentage_Value)
max_percentage_index = mfunction.index(max_percentage)
explode_values = [0] * len(labels)
explode_values[max_percentage_index] = EXPLODE_VALUE
colors = ["tan", "peru", "orange", "gold"]
plt.pie(mfunction, labels=labels, colors=colors, explode=explode_values, autopct='%1.1f%%', startangle=90,
textprops={'fontsize': 10})
plt.legend(loc="lower right", bbox_to_anchor=(1.5, 0))
plt.show()
return plt
def main():
skyscrapers_data = pd.read_csv("Skyscrapers2021.csv")
img = Image.open("photo.jpg")
st.image(img, width=700)
st.title("Top 100 Skyscrapers around the world!")
if st.checkbox("Show DataFrame"):
st.dataframe(skyscrapers_data, width=700, height=300)
if st.checkbox("Show all 100 Skyscrapers in the map"):
whole_map()
# sidebar选择rank
rank = st.sidebar.selectbox('Select rank:', ('1~20', '21~40', '41~60', '61~80', '81~100'))
rank_list = {'1~20': 1, '21~40': 21, '41~60': 41, '61~80': 61, '81~100': 81}
select_rank = rank_list[rank]
# 用选好的年份画map
st.write('Skyscrapers Rank ' + str(select_rank) + ' ~ ' + str(select_rank+19))
rank_map(select_rank)
#去除一个不重要的警告信息
st.set_option('deprecation.showPyplotGlobalUse', False)
st.pyplot(scatterplot())
# 插入pivot table
skyscrapers_data = pd.read_csv("Skyscrapers2021.csv")
skyscrapers_data.Meters = skyscrapers_data.Meters.str.replace(r'\s+m', '').astype(float)
tt = pd.pivot_table(skyscrapers_data, index=['CITY', 'COMPLETION', 'MATERIAL'],values=['RANK', 'Meters'])
st.dataframe(tt)
# sidebar选择年份
select_year = st.sidebar.slider("Select years", 1931, 2020)
# 用选好的年份画折线图
st.pyplot(average_height_line_chart(select_year))
selection = st.sidebar.selectbox("Select an option: ",("By Function", "By Material"))
st.set_option('deprecation.showPyplotGlobalUse', False)
st.pyplot(statisticchart(selection))
main()
| 40.029268 | 115 | 0.598343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,118 | 0.25071 |
6f2f4c53b7a08acbd2a5aec32456145e78be64d9 | 4,746 | py | Python | cifar_train.py | usumfabricae/sagemaker-multi-model-endpoint-tensorflow-computer-vision | 74a97ecc2fa9bf76c5543dfe23373a6c69c61647 | [
"MIT-0"
]
| 4 | 2021-05-30T22:15:34.000Z | 2022-03-12T23:01:36.000Z | cifar_train.py | usumfabricae/sagemaker-multi-model-endpoint-tensorflow-computer-vision | 74a97ecc2fa9bf76c5543dfe23373a6c69c61647 | [
"MIT-0"
]
| null | null | null | cifar_train.py | usumfabricae/sagemaker-multi-model-endpoint-tensorflow-computer-vision | 74a97ecc2fa9bf76c5543dfe23373a6c69c61647 | [
"MIT-0"
]
| 3 | 2021-06-08T12:04:43.000Z | 2021-06-12T13:44:48.000Z | from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import load_model
from tensorflow.keras import utils
import tensorflow as tf
import numpy as np
import argparse
import logging
import os
# Set Log Level
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Seed for Reproducability
SEED = 123
np.random.seed(SEED)
tf.random.set_seed(SEED)
# Setup Logger
logger = logging.getLogger('sagemaker')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
def parse_args():
parser = argparse.ArgumentParser()
# Hyperparameters sent by the client are passed as command-line arguments to the script
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--data', type=str, default=os.environ.get('SM_CHANNEL_DATA'))
parser.add_argument('--output', type=str, default=os.environ.get('SM_CHANNEL_OUTPUT'))
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))
parser.add_argument('--val', type=str, default=os.environ.get('SM_CHANNEL_VAL'))
parser.add_argument('--test', type=str, default=os.environ.get('SM_CHANNEL_TEST'))
parser.add_argument('--model_dir', type=str, default=os.environ.get('SM_MODEL_DIR'))
return parser.parse_known_args()
def get_train_data(train_dir):
X_train = np.load(os.path.join(train_dir, 'X_train.npy'))
y_train = np.load(os.path.join(train_dir, 'y_train.npy'))
logger.info(f'X_train: {X_train.shape} | y_train: {y_train.shape}')
return X_train, y_train
def get_validation_data(val_dir):
X_validation = np.load(os.path.join(val_dir, 'X_validation.npy'))
y_validation = np.load(os.path.join(val_dir, 'y_validation.npy'))
logger.info(f'X_validation: {X_validation.shape} | y_validation: {y_validation.shape}')
return X_validation, y_validation
def get_test_data(test_dir):
X_test = np.load(os.path.join(test_dir, 'X_test.npy'))
y_test = np.load(os.path.join(test_dir, 'y_test.npy'))
logger.info(f'X_test: {X_test.shape} | y_test: {y_test.shape}')
return X_test, y_test
if __name__ == '__main__':
logger.info(f'[Using TensorFlow version: {tf.__version__}]')
DEVICE = '/cpu:0'
args, _ = parse_args()
epochs = args.epochs
# Load train, validation and test sets from S3
X_train, y_train = get_train_data(args.train)
X_validation, y_validation = get_validation_data(args.val)
X_test, y_test = get_test_data(args.test)
with tf.device(DEVICE):
# Data Augmentation
TRAIN_BATCH_SIZE = 32
data_generator = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True)
train_iterator = data_generator.flow(X_train, y_train, batch_size=TRAIN_BATCH_SIZE)
# Define Model Architecture
model = Sequential()
# CONVOLUTIONAL LAYER 1
model.add(Conv2D(filters=16, kernel_size=2, padding='same', activation='relu', input_shape=(32, 32, 3)))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=2))
# CONVOLUTIONAL LAYER 1
model.add(Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=2))
# CONVOLUTIONAL LAYER 3
model.add(Conv2D(filters=64, kernel_size=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.3))
# FULLY CONNECTED LAYER
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(10, activation='softmax'))
model.summary()
# Compile Model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Train Model
BATCH_SIZE = 32
STEPS_PER_EPOCH = int(X_train.shape[0]/TRAIN_BATCH_SIZE)
model.fit(train_iterator,
steps_per_epoch=STEPS_PER_EPOCH,
batch_size=BATCH_SIZE,
epochs=epochs,
validation_data=(X_validation, y_validation),
callbacks=[],
verbose=2,
shuffle=True)
# Evaluate on Test Set
result = model.evaluate(X_test, y_test, verbose=1)
print(f'Test Accuracy: {result[1]}')
# Save Model
model.save(f'{args.model_dir}/1')
| 37.370079 | 112 | 0.676991 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,066 | 0.22461 |
6f2fda5d1a7f7912eef13fc0ff8b8f413ac5c9a7 | 1,373 | py | Python | corehq/form_processor/migrations/0049_case_attachment_props.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
]
| 1 | 2020-05-05T13:10:01.000Z | 2020-05-05T13:10:01.000Z | corehq/form_processor/migrations/0049_case_attachment_props.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
]
| 1 | 2019-12-09T14:00:14.000Z | 2019-12-09T14:00:14.000Z | corehq/form_processor/migrations/0049_case_attachment_props.py | MaciejChoromanski/commcare-hq | fd7f65362d56d73b75a2c20d2afeabbc70876867 | [
"BSD-3-Clause"
]
| 5 | 2015-11-30T13:12:45.000Z | 2019-07-01T19:27:07.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('form_processor', '0048_attachment_content_length_blob_id'),
]
operations = [
migrations.AddField(
model_name='xformattachmentsql',
name='properties',
field=jsonfield.fields.JSONField(default=dict),
preserve_default=True,
),
migrations.AddField(
model_name='caseattachmentsql',
name='attachment_from',
field=models.TextField(null=True),
preserve_default=True,
),
migrations.AddField(
model_name='caseattachmentsql',
name='properties',
field=jsonfield.fields.JSONField(default=dict),
preserve_default=True,
),
migrations.AddField(
model_name='caseattachmentsql',
name='attachment_src',
field=models.TextField(null=True),
preserve_default=True,
),
migrations.AddField(
model_name='caseattachmentsql',
name='identifier',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
]
| 29.212766 | 69 | 0.600874 | 1,201 | 0.874727 | 0 | 0 | 0 | 0 | 0 | 0 | 246 | 0.17917 |
6f30daadb871f9a5d1c444d73777bde40a45df2e | 8,658 | py | Python | src/utils/es_async.py | karawallace/mygene | 35bf066eb50bc929b4bb4e2423d47b4c98797526 | [
"Apache-2.0"
]
| null | null | null | src/utils/es_async.py | karawallace/mygene | 35bf066eb50bc929b4bb4e2423d47b4c98797526 | [
"Apache-2.0"
]
| null | null | null | src/utils/es_async.py | karawallace/mygene | 35bf066eb50bc929b4bb4e2423d47b4c98797526 | [
"Apache-2.0"
]
| 1 | 2018-11-17T08:53:06.000Z | 2018-11-17T08:53:06.000Z | import re
import json
import tornado.web
import tornado.httpclient
tornado.httpclient.AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
import tornadoes
from utils.es import (ESQuery, ESQueryBuilder,
MGQueryError, ElasticSearchException,
ES_INDEX_NAME_ALL)
from utils.dotfield import parse_dot_fields
from config import ES_HOST
class ESQueryAsync(ESQuery):
es_connection = tornadoes.ESConnection(ES_HOST.split(':')[0])
es_connection.httprequest_kwargs = {'request_timeout': 120.} # increase default timeout from 20 to 120s
def _search_async(self, q, species='all', callback=None):
self._set_index(species)
self.es_connection.search(callback=callback,
index=self._index,
type=self._doc_type,
source=q)
def _msearch_async(self, q, species='all', callback=None):
self._set_index(species)
path = '/'.join((self.es_connection.url, self._index, self._doc_type, '_msearch'))
request_http = tornadoes.HTTPRequest(path, method="POST", body=q,
**self.es_connection.httprequest_kwargs)
self.es_connection.client.fetch(request=request_http, callback=callback)
self._index = ES_INDEX_NAME_ALL # reset self._index
def get_gene2(self, geneid, fields='all', **kwargs):
'''for /gene/<geneid>'''
callback = kwargs.pop('callback', None)
is_async = callback is not None
options = self._get_cleaned_query_options(fields, kwargs)
qbdr = ESQueryBuilder(**options.kwargs)
_q = qbdr.build_id_query(geneid, options.scopes)
if options.rawquery:
if is_async:
callback(_q)
return
else:
return _q
if is_async:
def inner_callback(response):
res = json.loads(response.body)
if not options.raw:
res = self._cleaned_res(res, empty=None, single_hit=True, dotfield=options.dotfield)
callback(res)
self._search_async(_q, species=options.kwargs['species'], callback=inner_callback)
return
else:
res = self._search(_q)
if not options.raw:
res = self._cleaned_res(res, empty=None, single_hit=True, dotfield=options.dotfield)
return res
def _normalize_msearch_res(self, res, geneid_list, options):
assert len(res) == len(geneid_list)
_res = []
for i in range(len(res)):
hits = res[i]
qterm = geneid_list[i]
hits = self._cleaned_res(hits, empty=[], single_hit=False, dotfield=options.dotfield)
if len(hits) == 0:
_res.append({u'query': qterm,
u'notfound': True})
elif 'error' in hits:
_res.append({u'query': qterm,
u'error': True})
else:
for hit in hits:
hit[u'query'] = qterm
_res.append(hit)
return _res
def mget_gene2(self, geneid_list, fields=None, **kwargs):
'''for /query post request'''
callback = kwargs.pop('callback', None)
is_async = callback is not None
options = self._get_cleaned_query_options(fields, kwargs)
qbdr = ESQueryBuilder(**options.kwargs)
try:
_q = qbdr.build_multiple_id_query(geneid_list, options.scopes)
except MGQueryError as err:
res = {'success': False,
'error': err.message}
if is_async:
callback(res)
return
else:
return res
if options.rawquery:
if is_async:
callback(_q)
return
else:
return _q
if is_async:
def inner_callback(response):
if response.code == 599 and response.body is None:
res = {'success': False,
'error': 'timeout'}
else:
res = json.loads(response.body)['responses']
if not options.raw:
res = self._normalize_msearch_res(res, geneid_list, options)
callback(res)
self._msearch_async(_q, species=kwargs['species'], callback=inner_callback)
return
else:
res = self._msearch(_q, kwargs['species'])['responses']
return res if options.raw else self._normalize_msearch_res(res, geneid_list, options)
@staticmethod
def _normalize_query_res(res, options):
if "error" in res:
return {'success': False,
'error': "invalid query term."}
_res = res['hits']
_res['took'] = res['took']
if "facets" in res:
_res['facets'] = res['facets']
for v in _res['hits']:
del v['_type']
del v['_index']
for attr in ['fields', '_source']:
if attr in v:
v.update(v[attr])
del v[attr]
break
if not options.dotfield:
parse_dot_fields(v)
res = _res
return res
def query(self, q, fields=None, **kwargs):
'''for /query?q=<query>'''
callback = kwargs.pop('callback', None)
is_async = callback is not None
options = self._get_cleaned_query_options(fields, kwargs)
qbdr = ESQueryBuilder(**options.kwargs)
q = re.sub(u'[\t\n\x0b\x0c\r\x00]+', ' ', q)
q = q.strip()
_q = None
# Check if special interval query pattern exists
interval_query = self._parse_interval_query(q)
try:
if interval_query:
#should also passing a "taxid" along with interval.
if qbdr.species != 'all':
qbdr.species = [qbdr.species[0]]
_q = qbdr.build_genomic_pos_query(**interval_query)
else:
res = {'success': False,
'error': 'genomic interval query cannot be combined with "species=all" parameter. Specify a single species.'}
if is_async:
callback(res)
return
else:
return res
# Check if fielded/boolean query, excluding special goid query
# raw_string_query should be checked ahead of wildcard query, as raw_string may contain wildcard as well
# e.g., a query "symbol:CDK?", should be treated as raw_string_query.
elif self._is_raw_string_query(q) and not q.lower().startswith('go:'):
_q = qbdr.build(q, mode=3) # raw string query
elif self._is_wildcard_query(q):
_q = qbdr.build(q, mode=2) # wildcard query
else:
# normal text query
_q = qbdr.build(q, mode=1)
except MGQueryError as err:
res = {'success': False,
'error': err.message}
if is_async:
callback(res)
return
else:
return res
if _q:
if options.rawquery:
if is_async:
callback(_q)
return
else:
return _q
if is_async:
def inner_callback(response):
res = json.loads(response.body)
if not options.raw:
res = self._normalize_query_res(res, options)
callback(res)
self._search_async(_q, species=kwargs['species'], callback=inner_callback)
return
else:
try:
res = self._search(_q, species=kwargs['species'])
if not options.raw:
res = self._normalize_query_res(res, options)
except ElasticSearchException as err:
err_msg = err.message if options.raw else "invalid query term."
res = {'success': False,
'error': err_msg}
else:
res = {'success': False, 'error': "Invalid query. Please check parameters."}
if is_async:
callback(res)
return
else:
return res
| 38.825112 | 136 | 0.524371 | 8,258 | 0.9538 | 0 | 0 | 678 | 0.078309 | 0 | 0 | 1,200 | 0.1386 |
6f31322afdaea5a169b7473328dfc029ea716e21 | 10,203 | py | Python | processviz/test.py | jurgendn/processviz | 82808a92662962f04c48673c9cf159d7bc904ff7 | [
"BSD-3-Clause"
]
| null | null | null | processviz/test.py | jurgendn/processviz | 82808a92662962f04c48673c9cf159d7bc904ff7 | [
"BSD-3-Clause"
]
| null | null | null | processviz/test.py | jurgendn/processviz | 82808a92662962f04c48673c9cf159d7bc904ff7 | [
"BSD-3-Clause"
]
| 2 | 2020-03-19T11:14:13.000Z | 2021-08-14T14:24:08.000Z | """
Thư viện này viết ra phục vụ cho môn học `Các mô hình ngẫu nhiên và ứng dụng`
Sử dụng các thư viện `networkx, pandas, numpy, matplotlib`
"""
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.image import imread
import pandas as pd
def _gcd(a, b):
if a == 0:
return b
return _gcd(b % a, a)
def gcd(arr):
if len(arr) == 0:
return 0
if (len(arr) == 1):
return arr[0]
t = arr[0]
for i in range(len(arr)):
t = _gcd(t, arr[i])
return t
class MarkovChain:
"""
Constructor function: Generate blank instance
Có 2 cách để xích:
- Nhập từ file csv:
Sử dụng from_file
- Nhập từ bàn phím:
Sử dụng from_stdin
"""
def __init__(self):
self.data = None
self.state = None
self.struct = None
def from_stdin(self, state=None, data=None, pi=None):
if state == None or data == None:
return "Nothing is given"
else:
self.P = data
self.pi = pi
self.data = self.P
self.state = state
self.struct = self.__generate_struct__()
def from_file(self, path='input.csv'):
data = pd.read_csv(path)
matrix = pd.DataFrame(data)
data = matrix.values.tolist()
self.pi = data[0]
self.state = matrix.columns
self.P = data[1:]
self.data = self.P
self.struct = self.__generate_struct__()
"""
Sinh ra cấu trúc của đồ thị
Cấu trúc của đồ thị hiện tại như sau:
['đỉnh 1', 'đỉnh 2', '{'label':label}']
"""
def __generate_struct__(self):
struct = []
for i in range(len(self.data)):
for j in range(len(self.data)):
if self.data[i][j] > 0:
struct.append([self.state[i], self.state[j],
{'label': self.data[i][j]}])
return struct
"""
Sinh ma trận xác suất chuyển trạng thái của quá trình
"""
def matrix_at(self, n):
self.data = np.matrix.round(np.linalg.matrix_power(self.P, n), 3)
self.struct = self.__generate_struct__()
"""
Sinh đồ thị, đồ thị được lưu vào thư mục img
"""
def __get_state_vector__(self, n):
self.matrix_at(n)
self.state_vector = np.matmul(self.pi, self.data)
def __get_state_track__(self, n):
state = np.empty(shape=(len(self.pi), 1))
state = state.tolist()
steps = []
for i in range(n):
steps.append(i+1)
self.__get_state_vector__(i)
state.append(self.state_vector)
state = np.transpose(state)
return state.tolist(), steps
def generate_state_graph(self, n):
if self.pi == None:
return "Not found origin state"
else:
state, steps = self.__get_state_track__(n)
legend = self.state
for i in range(len(self.pi)):
plt.plot(steps, state[i][1:])
plt.legend(legend, loc='best')
plt.title("Distribution state vector through time")
plt.xlabel("Steps")
plt.ylabel("Probability")
plt.savefig('img/state_vector.svg', format='svg', dpi=1200)
plt.show()
def generate_graph(self, n=1):
if self.state is None:
return "Graph is empty. \n Nothing to show"
else:
self.matrix_at(n)
self = nx.drawing.nx_agraph.to_agraph(nx.DiGraph(self.struct))
self.layout('dot')
self.node_attr.update(color='red', height=0.5,
width=0.5, fontname="Calibri", fontsize=10)
self.edge_attr.update(color='blue', fontsize=8,
fontname="Calibri", rotate=True)
self.draw('img/Graph.svg')
self.draw('img/Graph.png')
img = imread('img/Graph.png')
plt.axis("off")
plt.imshow(img)
def __convert_to_adjagecy__(self):
adjagecy_vector = {i: [] for i in self.state}
for i in range(len(self.P)):
for j in range(len(self.P)):
if self.P[i][j] != 0:
adjagecy_vector[self.state[i]].append(self.state[j])
return adjagecy_vector
def is_connected(self, source, target):
vector = self.__convert_to_adjagecy__()
visit_status = {i: False for i in self.state}
queue = []
queue.append(source)
while queue != []:
current_state = queue[0]
visit_status[current_state] = True
queue.pop(0)
for s in vector[current_state]:
if target == s:
return True
if visit_status[s] == False:
queue.append(s)
return False
# This part is unused -> comment for later use
# ------------------------------------------
# def has_selfloop(self):
# for i in range(len(self.P)):
# if self.P[i][i] != 0:
# return True
# return False
# def rank_test(self):
# P = np.subtract(self.P, np.identity(len(self.P)))
# if np.linalg.matrix_rank(P) == len(self.P):
# return True
# return False
# -------------------------------------------
def is_regular(self):
# Check is irreducible
component = self.get_connected_component()
if len(component) > 1:
return False
tmp = self.get_period(self.state[0])
if tmp == 1:
return True
return False
# ----------------------------------------------------------
# Get period of a state
# ----------------------------------------------------------
def __cycle_length__(self, source):
vector = self.__convert_to_adjagecy__()
visit_status = {i: False for i in self.state}
step = 0
queue = [source]
while queue != []:
current_state = queue[0]
visit_status[current_state] = True
queue.pop(0)
step += 1
for s in vector[current_state]:
if s == source:
return step
if visit_status[s] == False:
queue.append(s)
return step
def get_connected_component(self):
connected_component = [[]]
status = {i: False for i in self.state}
while True:
counter = 0
for i in self.state:
for j in self.state:
if (self.is_connected(i, j) and self.is_connected(j, i)):
if status[i] == False:
connected_component[counter].append(i)
status[i] = True
if status[j] == False:
connected_component[counter].append(j)
status[j] = True
connected_component.append([])
counter += 1
if i == self.state[len(self.state) - 1] and j == self.state[len(self.state) - 1]:
break
connected_component = list(filter(None, connected_component))
return connected_component
def get_period(self, target):
component = self.get_connected_component()
for sl in component:
if target in sl:
break
t = []
if target not in sl:
return 0
else:
for i in sl:
t.append(self.__cycle_length__(i))
return gcd(t)
# ----------------------------------------------------
# Get steady state
# ----------------------------------------------------
def get_steady_state(self):
A = np.transpose(self.P)
A = np.subtract(A, np.identity(len(A)))
A = np.ndarray.tolist(A)
A.append(np.ndarray.tolist(np.ones(len(A))))
b = np.ndarray.tolist(np.transpose(np.zeros(len(A))))
b[len(b)-1] = 1
# Calc
return np.matmul(np.linalg.inv(np.matmul(np.transpose(A), A)), np.matmul(np.transpose(A), b))
# ----------------------------------------------------
# Get mean time spent
# ----------------------------------------------------
def __get_index__(self, state_set):
idx_list = []
tmp = list(self.state)
try:
for state in state_set:
idx_list.append(tmp.index(state))
del tmp
return idx_list
except:
return "State is not in the state set"
def __get_absoring_state__(self):
abr_state = []
for i in range((len(self.state))):
if self.P[i][i] == 1:
abr_state.append(self.state[i])
return abr_state
def __get_mean_state_list__(self, state_set):
tmp = list(self.state)
tmp = [state for state in tmp if state not in rm_state]
return tmp
def __get_mean_time_absoring__(self):
try:
idx_list = self.__get_index__(self.__get_absoring_state__())
state_list = self.__get_mean_state_list__(target_set)
P = self.data
P = np.delete(P, idx_list, 0)
P = np.delete(P, idx_list, 1)
P = np.transpose(P)
I = np.identity(len(P))
A = np.subtract(I, P)
b = np.transpose(np.ones(len(P)))
x = np.round(np.linalg.solve(A, b), 2)
del idx_list, P, I, A, b
mean_time = {"Mean time spent " +
state: x_val for (state, x_val) in zip(state_list, x)}
return mean_time
except:
return "Check your state or matrix"
def __get_mean_time_transient__(self, source=None, target=None):
idx_list = self.__get_index__(self.__get_absoring_state__())
P = self.data
P = np.delete(P, idx_list, 0)
P = np.delete(P, idx_list, 1)
P = np.transpose(P)
I = np.identity(len(P))
A = np.subtract(I, P)
A = A.tolist()
if source == None or target == None:
return A
| 32.287975 | 101 | 0.503283 | 9,750 | 0.944493 | 0 | 0 | 0 | 0 | 0 | 0 | 1,938 | 0.187736 |
6f319a2e3b23a21c6ff1ef69178d3b4bc2931b78 | 3,322 | py | Python | src/check_results.py | jagwar/Sentiment-Analysis | 312186c066c360ed4b3ebc9e999dba419f10e93c | [
"MIT"
]
| null | null | null | src/check_results.py | jagwar/Sentiment-Analysis | 312186c066c360ed4b3ebc9e999dba419f10e93c | [
"MIT"
]
| null | null | null | src/check_results.py | jagwar/Sentiment-Analysis | 312186c066c360ed4b3ebc9e999dba419f10e93c | [
"MIT"
]
| null | null | null | import os
import json
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, TensorDataset, SequentialSampler
from transformers import CamembertTokenizer, CamembertForSequenceClassification
import pandas as pd
from tqdm import tqdm, trange
# tokenizer = CamembertTokenizer.from_pretrained('/home/crannou/workspace/sentiment-eai/data/36e8f471-821d-4270-be56-febb1be36c26')
# model = CamembertForSequenceClassification.from_pretrained('/home/crannou/workspace/sentiment-eai/data/36e8f471-821d-4270-be56-febb1be36c26')
# tokenizer = CamembertTokenizer.from_pretrained('/home/crannou/workspace/sentiment-eai/7a37b1e5-8e7b-45d1-9e87-7314e8e66c0c/')
# model = CamembertForSequenceClassification.from_pretrained('/home/crannou/workspace/sentiment-eai/7a37b1e5-8e7b-45d1-9e87-7314e8e66c0c/')
tokenizer = CamembertTokenizer.from_pretrained('/home/crannou/workspace/serving-preset-images/sentiment-analysis-fr/app/model_sources')
model = CamembertForSequenceClassification.from_pretrained('/home/crannou/workspace/serving-preset-images/sentiment-analysis-fr/app/model_sources')
def eval_model():
df = pd.read_csv('/home/crannou/notebooks/review_polarity_bin.csv', sep=';')
preds = []
all_input_ids = []
all_attention_masks = []
df = df.sample(frac=0.1, random_state=42)
all_labels = df['polarity'].values
for sentence in df['review_content']:
input_ids, attention_mask = get_features(sentence)
all_input_ids.append(input_ids)
all_attention_masks.append(attention_mask)
t_inputs_ids = torch.tensor(all_input_ids, dtype=torch.long)
t_attention_mask = torch.tensor(all_attention_masks, dtype=torch.long)
t_labels = torch.tensor(all_labels, dtype=torch.long)
dataset = TensorDataset(t_inputs_ids, t_attention_mask, t_labels)
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(
dataset, sampler=eval_sampler, batch_size=32
)
model.eval()
preds = None
out_label_ids = None
with torch.no_grad():
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to("cpu") for t in batch)
inputs = {
"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[2]}
outputs = model(**inputs)
_, logits = outputs[:2]
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
preds = np.argmax(preds, axis=1)
result = {"acc": (preds == out_label_ids).mean()}
print(result)
def get_features(sentence):
max_length=min(128, tokenizer.max_len)
input_ids = tokenizer.encode(
sentence, add_special_tokens=True, max_length=min(128, tokenizer.max_len),
)
padding_length = max_length - len(input_ids)
attention_mask = [1] * len(input_ids)
input_ids = input_ids + ([0] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
return input_ids, attention_mask
if __name__ == '__main__':
eval_model() | 40.024096 | 147 | 0.705298 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 875 | 0.263396 |
6f31bdd4727dd7111ae865267e15057fbd15d9fb | 29 | py | Python | Pacotes/ex022.py | TonyRio/Python-Exercicios | 8a72d1b12418c6485794dae184425df0daf098bb | [
"MIT"
]
| null | null | null | Pacotes/ex022.py | TonyRio/Python-Exercicios | 8a72d1b12418c6485794dae184425df0daf098bb | [
"MIT"
]
| null | null | null | Pacotes/ex022.py | TonyRio/Python-Exercicios | 8a72d1b12418c6485794dae184425df0daf098bb | [
"MIT"
]
| null | null | null | print (19 // 2 )
print( 19%2) | 14.5 | 16 | 0.551724 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6f32849e7bc2a9a3bdff91b0ea97b373245c40e0 | 934 | py | Python | uscampgrounds/models.py | adamfast/geodjango-uscampgrounds | 0ddcdfee44dd2cb3525bbf852e93a58e5429d0d8 | [
"BSD-3-Clause"
]
| 1 | 2020-06-26T22:32:25.000Z | 2020-06-26T22:32:25.000Z | uscampgrounds/models.py | adamfast/geodjango-uscampgrounds | 0ddcdfee44dd2cb3525bbf852e93a58e5429d0d8 | [
"BSD-3-Clause"
]
| null | null | null | uscampgrounds/models.py | adamfast/geodjango-uscampgrounds | 0ddcdfee44dd2cb3525bbf852e93a58e5429d0d8 | [
"BSD-3-Clause"
]
| null | null | null | from django.conf import settings
from django.contrib.gis.db import models
class Campground(models.Model):
campground_code = models.CharField(max_length=64)
name = models.CharField(max_length=256)
campground_type = models.CharField(max_length=256)
phone = models.CharField(max_length=256)
comments = models.TextField()
sites = models.CharField(max_length=256)
elevation = models.CharField(max_length=256)
hookups = models.CharField(max_length=256)
amenities = models.TextField()
point = models.PointField(srid=4326)
def locator_point(self):
return self.point
def __unicode__(self):
return self.name
# integrate with the django-locator app for easy geo lookups if it's installed
if 'locator.objects' in settings.INSTALLED_APPS:
from locator.objects.models import create_locator_object
models.signals.post_save.connect(create_locator_object, sender=Campground)
| 35.923077 | 78 | 0.755889 | 589 | 0.630621 | 0 | 0 | 0 | 0 | 0 | 0 | 95 | 0.101713 |
6f34182931d744d711a9eaa391580c23eb3546c2 | 383 | py | Python | blog/users/urls.py | simpleOnly1/blog | 34343068318a64bd537e5862181e037fc4636247 | [
"MIT"
]
| null | null | null | blog/users/urls.py | simpleOnly1/blog | 34343068318a64bd537e5862181e037fc4636247 | [
"MIT"
]
| null | null | null | blog/users/urls.py | simpleOnly1/blog | 34343068318a64bd537e5862181e037fc4636247 | [
"MIT"
]
| null | null | null | #进行users 子应用的视图路由
from django.urls import path
from users.views import RegisterView, ImageCodeView,SmsCodeView
urlpatterns = [
#path的第一个参数:路由
#path的第二个函数:视图函数名
path('register/', RegisterView.as_view(),name='register'),
#图片验证码的路由
path('imagecode/',ImageCodeView.as_view(),name='imagecode'),
#短信发送
path('smscode/',SmsCodeView.as_view(),name='smscode'),
] | 25.533333 | 64 | 0.715405 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 211 | 0.449893 |
6f3478c403c5a4607452ef969c0985f21a247166 | 11,861 | py | Python | src/config/fabric-ansible/ansible-playbooks/filter_plugins/import_lldp_info.py | EWERK-DIGITAL/tf-controller | 311ea863b03d425a67d04d27c1f1b9cf1e20c926 | [
"Apache-2.0"
]
| null | null | null | src/config/fabric-ansible/ansible-playbooks/filter_plugins/import_lldp_info.py | EWERK-DIGITAL/tf-controller | 311ea863b03d425a67d04d27c1f1b9cf1e20c926 | [
"Apache-2.0"
]
| null | null | null | src/config/fabric-ansible/ansible-playbooks/filter_plugins/import_lldp_info.py | EWERK-DIGITAL/tf-controller | 311ea863b03d425a67d04d27c1f1b9cf1e20c926 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/python
from builtins import object
from builtins import str
import sys
import traceback
sys.path.append("/opt/contrail/fabric_ansible_playbooks/module_utils") # noqa
from filter_utils import _task_done, _task_error_log, _task_log, FilterLog
from job_manager.job_utils import JobVncApi
class FilterModule(object):
def filters(self):
return {
'import_lldp_info': self.import_lldp_info,
}
# end filters
def _instantiate_filter_log_instance(self, device_name):
FilterLog.instance("Import_lldp_info_Filter", device_name)
# end _instantiate_filter_log_instance
def import_lldp_info(self, job_ctx, prouter_fqname,
prouter_vendor,
lldp_neighbors_payload):
"""Topology discovery.
:param job_ctx: Dictionary
# example:
# {
# "auth_token": "EB9ABC546F98",
# "job_input": {
# "fabric_fq_name": [
# "default-global-system-config",
# "fab01"
# ],
# "device_auth": [{
# "username": "root",
# "password": "Embe1mpls"
# }],
# "management_subnets": [
# {
# "cidr": "10.87.69.0/25",
# "gateway": "10.87.69.1"
# }
# ],
# "overlay_ibgp_asn": 64512,
# "node_profiles": [
# {
# "node_profile_name": "juniper-qfx5k"
# }
# ]
# }
# }
:param prouter_fqname: List
example:
# [
# "default-global-system-config",
# "5c3-qfx2"
# ]
:param prouter_vendor: String
example: "juniper"
:param lldp_neighbors_payload: Dictionary
# example:
# {
# "neighbor_info_list":
# [
# {
# "local_physical_interface_name": "xe-0/0/0",
# "remote_device_name": "5b5-qfx11",
# "remote_physical_interface_port_id": "536"
# },
# {
# "local_physical_interface_name": "xe-0/0/2",
# "remote_device_chassis_id": "00:1a:53:46:7b:9e",
# "remote_physical_interface_port_id": "538"
# }
# ]
# }
:return: Dictionary
# if success, returns
# {
# 'status': 'success',
# 'topology_discovery_log':
# <String: topology_discovery_log>,
# 'topology_discovery_resp':
# <Dictionary: topology_discovery_resp>
# }
# if failure, returns
# {
# 'status': 'failure',
# 'error_msg': <String: exception message>,
# 'topology_discovery_log':
# <String: topology_discovery_log>,
# 'topology_discovery_resp':
# <Dictionary: topology_discovery_resp>
# }
:param topology_discovery_resp: Dictionary
# example:
# {
# "lldp_neighbors_success_names":
# <List: <String: lldp_neighbors_success_pair_string>>,
# "lldp_neighbors_failed_info":
# <List: <Dictionary: lldp_neighbor_failed_obj> >
# }
# :param lldp_neighbors_success_names: List
# example:
# ["bng-contrail-qfx51-15 : ge-0/0/36 --> dhawan : ge-2/3/1"]
# :param lldp_neighbors_failed_info: List
# example:
# [
# {
# "lldp_neighbor":
# "bng-contrail-qfx51-15 : em0 --> sw174 : ge-1/0/46",
# "warning_message":
# "Unknown physical interface ng-contrail-qfx51-15:em0"
# }
# ]
"""
self._instantiate_filter_log_instance(prouter_fqname[-1])
_task_log("Starting Topology Discovery")
try:
_task_log("Creating neighboring links")
topology_discovery_resp = self._create_neighbor_links(
job_ctx,
lldp_neighbors_payload,
prouter_fqname,
prouter_vendor)
_task_done()
return {
'status': 'success',
'topology_discovery_log': FilterLog.instance().dump(),
'topology_discovery_resp': topology_discovery_resp
}
except Exception as ex:
_task_error_log(str(ex))
_task_error_log(traceback.format_exc())
return {'status': 'failure',
'error_msg': str(ex),
'topology_discovery_log': FilterLog.instance().dump()}
# end import_lldp_info
def get_vnc_payload(self, vnc_lib, prouter_fqname,
prouter_vendor,
lldp_neighbors_info):
vnc_payload = []
chassis_id_device_name_map = self.get_chassis_id_to_device_name(
vnc_lib, prouter_vendor)
for lldp_neighbor_info in lldp_neighbors_info or []:
local_phy_int = lldp_neighbor_info.get(
'local_physical_interface_name')
phy_int_fqname = []
phy_int_fqname.extend(prouter_fqname)
phy_int_fqname.append(local_phy_int.replace(":", "_"))
remote_device_chassis_id = lldp_neighbor_info.get(
'remote_device_chassis_id')
remote_device_name = chassis_id_device_name_map.get(
remote_device_chassis_id)
if not remote_device_name:
remote_device_name = lldp_neighbor_info.get(
'remote_device_name')
if remote_device_name:
remote_phy_int_fqname_str = \
remote_device_name.replace(
":", "_") + ":" +\
lldp_neighbor_info.get(
'remote_physical_interface_port_id')
vnc_payload.append((phy_int_fqname, remote_phy_int_fqname_str))
return vnc_payload
# end get_vnc_payload
# get chassis mac id to physical router name map
# for all the physical routers in the fabric
def get_chassis_id_to_device_name(self, vnc_lib, prouter_vendor):
chassis_id_to_device_name_map = {}
phy_routers_list = vnc_lib.physical_routers_list(
fields=['device_chassis_refs']).get('physical-routers')
for phy_router in phy_routers_list or []:
if phy_router.get('device_chassis_refs'):
device_chassis_id_info = phy_router.get(
'device_chassis_refs')
for chassis_id_info in device_chassis_id_info or []:
chassis_mac = chassis_id_info['to'][-1].split(
prouter_vendor + '_')[1].replace('_', ':')
chassis_id_to_device_name_map[chassis_mac] = \
phy_router['fq_name'][-1]
return chassis_id_to_device_name_map
# end get_chassis_id_to_device_name
# group vnc functions
def _create_neighbor_links(self, job_ctx,
lldp_neighbors_payload,
prouter_fqname,
prouter_vendor):
if not lldp_neighbors_payload.get('neighbor_info_list'):
_task_log("No neighbors found")
_task_done()
return {
'lldp_neighbors_success_names': [],
'lldp_neighbors_failed_info': []
}
vnc_lib = JobVncApi.vnc_init(job_ctx)
vnc_topology_disc_payload = self.get_vnc_payload(
vnc_lib,
prouter_fqname,
prouter_vendor,
lldp_neighbors_payload['neighbor_info_list'])
topology_disc_payload = self._do_further_parsing(
vnc_lib, vnc_topology_disc_payload)
_task_done("Parsed payload completely")
_task_log("Creating links between neighboring physical interfaces")
topology_discovery_resp = self._create_physical_interface_refs(
vnc_lib, topology_disc_payload)
return topology_discovery_resp
# end _create_neighbor_links
def _do_further_parsing(self, vnc_lib, neighbor_info_list):
topology_disc_payload = []
for neighbor_info in neighbor_info_list or []:
remote_neighbor_info = neighbor_info[1].split(":", 1)
list_resp = vnc_lib.physical_interfaces_list(
parent_fq_name=["default-global-system-config",
remote_neighbor_info[0]],
filters={"physical_interface_port_id":
remote_neighbor_info[1]}
)
if list_resp['physical-interfaces']:
topology_disc_payload.append([neighbor_info[0],
list_resp['physical-interfaces']
[0]['fq_name']])
return topology_disc_payload
# end _do_further_parsing
def _create_physical_interface_refs(self, vnc_lib, topology_disc_payload):
# create or update refs between physical interfaces
# on the local device to the remote device
object_type = "physical_interface"
lldp_neighbors_success_names = []
lldp_neighbors_failed_info = []
for topology_disc_info in topology_disc_payload or []:
try:
object_fqname = topology_disc_info[0]
ref_fqname = topology_disc_info[1]
pi_obj = vnc_lib.physical_interface_read(fq_name=object_fqname)
# Check ref already present or not
refs = pi_obj.get_physical_interface_refs()
is_link_found = False
if refs:
for ref in refs:
if ref['to'] == ref_fqname:
is_link_found = True
if not is_link_found:
ref_uuid = vnc_lib.fq_name_to_id(object_type, ref_fqname)
pi_obj.set_physical_interface_list([{"to": ref_fqname,
"uuid": ref_uuid}])
vnc_lib.physical_interface_update(pi_obj)
lldp_neighbors_success_names.append(object_fqname[-2] + " : " +
object_fqname[-1] +
" --> " +
ref_fqname[-2] + " : " +
ref_fqname[-1])
except Exception as ex:
_task_error_log(str(ex))
_task_error_log(traceback.format_exc())
lldp_neighbor_failed_obj = {
"lldp_neighbor": object_fqname[-2] + " : " +
object_fqname[-1] + " --> " +
ref_fqname[-2] + " : " +
ref_fqname[-1],
"warning_message": str(ex)
}
lldp_neighbors_failed_info.append(lldp_neighbor_failed_obj)
return {
'lldp_neighbors_success_names': lldp_neighbors_success_names,
'lldp_neighbors_failed_info': lldp_neighbors_failed_info
}
# end _create_physical_interface_refs
| 39.802013 | 79 | 0.517073 | 11,558 | 0.974454 | 0 | 0 | 0 | 0 | 0 | 0 | 4,992 | 0.420875 |
6f355a92c02e0c6216729df9bbfec7b8bd8e4145 | 527 | py | Python | src/shared/_menu.py | MarcSkovMadsen/awesome-panel-starter | b76854882a041c7b955a59785d08e167ffef07af | [
"Apache-2.0"
]
| 5 | 2021-01-04T16:39:09.000Z | 2021-08-03T15:26:49.000Z | src/shared/_menu.py | MarcSkovMadsen/awesome-panel-starter | b76854882a041c7b955a59785d08e167ffef07af | [
"Apache-2.0"
]
| 6 | 2020-12-28T03:28:25.000Z | 2021-09-11T13:07:51.000Z | src/shared/_menu.py | MarcSkovMadsen/awesome-panel-starter | b76854882a041c7b955a59785d08e167ffef07af | [
"Apache-2.0"
]
| 1 | 2021-09-15T20:08:44.000Z | 2021-09-15T20:08:44.000Z | """Provides the MENU html string which is appended to all templates
Please note that the MENU only works in [Fast](https://www.fast.design/) based templates.
If you need some sort of custom MENU html string feel free to customize this code.
"""
from awesome_panel_extensions.frameworks.fast.fast_menu import to_menu
from src.shared import config
if config.applications:
MENU = to_menu(
config.applications.values(), accent_color=config.color_primary, expand=["Main"]
).replace("\n", "")
else:
MENU = ""
| 31 | 89 | 0.73814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 260 | 0.493359 |
6f35c3c4af214e988cae123b40970464d22b95ab | 1,909 | py | Python | prediction-api/app.py | BrokenImage/raptor-api | 2cafc7fedf883a730d22dc0e2898f531d20fedf2 | [
"MIT",
"Unlicense"
]
| null | null | null | prediction-api/app.py | BrokenImage/raptor-api | 2cafc7fedf883a730d22dc0e2898f531d20fedf2 | [
"MIT",
"Unlicense"
]
| null | null | null | prediction-api/app.py | BrokenImage/raptor-api | 2cafc7fedf883a730d22dc0e2898f531d20fedf2 | [
"MIT",
"Unlicense"
]
| null | null | null | import os
import boto3
import numpy as np
import tensorflow as tf
from flask import Flask
from dotenv import load_dotenv
from pymongo import MongoClient
from keras.models import load_model
from sklearn.preprocessing import LabelEncoder
from werkzeug.datastructures import FileStorage
from werkzeug.middleware.proxy_fix import ProxyFix
from flask_restplus import Api, Resource
from utils.Model import ModelManager
load_dotenv()
# Mongodb connection
client = MongoClient(os.environ['MONGO_CLIENT_URL'])
db = client.registry
# AWS S3 connection
session = boto3.Session(
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_KEY']
)
s3 = session.resource('s3')
# App and API setup
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
api = Api(app, version="1.0", title="Anomaly Detection", description="")
ns = api.namespace('api')
single_parser = api.parser()
single_parser.add_argument("files", location="files", type=FileStorage, action='append', required=True)
graph = tf.get_default_graph()
backup_model = load_model("./models/backup/model.h5")
backup_label_encoder = LabelEncoder()
backup_label_encoder.classes_ = np.load("./models/backup/classes.npy")
@ns.route("/classify")
class MultiClassification(Resource):
@api.doc(parser=single_parser, description='Upload an image of a solar panel')
def post(self):
model = ModelManager(db, s3, graph, backup_model, backup_label_encoder,
bucket_name=os.environ['AWS_BUCKET_NAME'])
model.load_latest_model()
args = single_parser.parse_args()
image_files = args.files
preds = []
for image in image_files:
image_array = model.preprocess(image)
preds.append(model.predict(image_array)[0])
return {'prediction': str(preds)}
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0")
| 30.790323 | 103 | 0.736511 | 595 | 0.311682 | 0 | 0 | 618 | 0.32373 | 0 | 0 | 316 | 0.165532 |
6f35ce7e4cec8e809fb6bd6d1db0395eade06403 | 633 | py | Python | misc/fill_blanks.py | netotz/codecamp | ff6b5ce1af1d99bbb00f7e095ca6beac92020b1c | [
"Unlicense"
]
| null | null | null | misc/fill_blanks.py | netotz/codecamp | ff6b5ce1af1d99bbb00f7e095ca6beac92020b1c | [
"Unlicense"
]
| null | null | null | misc/fill_blanks.py | netotz/codecamp | ff6b5ce1af1d99bbb00f7e095ca6beac92020b1c | [
"Unlicense"
]
| 1 | 2020-04-05T06:22:18.000Z | 2020-04-05T06:22:18.000Z | # Given an array containing None values fill in the None values with most recent
# non None value in the array
from random import random
def generate_sample(n):
rand = 0.9
while n:
yield int(rand * 10) if rand % 1 > 1 / 3 else None
rand = random()
n -= 1
def fill1(array):
for i in range(len(array)):
if array[i] is None:
array[i] = array[i - 1]
return array
def fill2(array):
for i, num in enumerate(array):
if num is None:
array[i] = array[i - 1]
return array
test = list(map(int, input().split()))
print(fill1(test))
print(fill2(test))
| 22.607143 | 81 | 0.593997 | 0 | 0 | 149 | 0.235387 | 0 | 0 | 0 | 0 | 110 | 0.173776 |
6f37404f1493e37478a90fbc8c755991983fccf9 | 3,836 | py | Python | beast/tests/helpers.py | marthaboyer/beast | 1ca71fb64ab60827e4e4e1937b64f319a98166c3 | [
"BSD-3-Clause"
]
| null | null | null | beast/tests/helpers.py | marthaboyer/beast | 1ca71fb64ab60827e4e4e1937b64f319a98166c3 | [
"BSD-3-Clause"
]
| null | null | null | beast/tests/helpers.py | marthaboyer/beast | 1ca71fb64ab60827e4e4e1937b64f319a98166c3 | [
"BSD-3-Clause"
]
| null | null | null | # useful functions for BEAST tests
# put here instead of having in every tests
import os.path
import numpy as np
import h5py
from astropy.io import fits
from astropy.utils.data import download_file
__all__ = ['download_rename', 'compare_tables', 'compare_fits',
'compare_hdf5']
def download_rename(filename):
"""Download a file and rename it to have the right extension.
Otherwise, downloaded file will not have an extension at all and an
extension is needed for the BEAST.
Parameters
----------
filename : str
name of file to download
"""
url_loc = 'http://www.stsci.edu/~kgordon/beast/'
fname_dld = download_file('%s%s' % (url_loc, filename))
extension = filename.split('.')[-1]
fname = '%s.%s' % (fname_dld, extension)
os.rename(fname_dld, fname)
return fname
def compare_tables(table_cache, table_new):
"""
Compare two tables using astropy tables routines.
Parameters
----------
table_cache : astropy table
table_new : astropy table
data for comparision.
"""
assert len(table_new) == len(table_cache)
for tcolname in table_new.colnames:
# test numerical types for closeness
# and other types for equality
if table_new[tcolname].data.dtype.kind in ['f', 'i']:
np.testing.assert_allclose(table_new[tcolname],
table_cache[tcolname],
err_msg=('%s columns not equal'
% tcolname))
else:
np.testing.assert_equal(table_new[tcolname],
table_cache[tcolname],
err_msg=('%s columns not equal'
% tcolname))
def compare_fits(fname_cache, fname_new):
"""
Compare two FITS files.
Parameters
----------
fname_cache : str
fname_new : type
names to FITS files
"""
fits_cache = fits.open(fname_cache)
fits_new = fits.open(fname_new)
assert len(fits_new) == len(fits_cache)
for k in range(1, len(fits_new)):
qname = fits_new[k].header['EXTNAME']
np.testing.assert_allclose(fits_new[k].data,
fits_cache[qname].data,
err_msg=('%s FITS extension not equal'
% qname))
def compare_hdf5(fname_cache, fname_new, ctype=None):
"""
Compare two hdf files.
Parameters
----------
fname_cache : str
fname_new : type
names to hdf5 files
ctype : str
if set, string to identify the type of data being tested
"""
hdf_cache = h5py.File(fname_cache, 'r')
hdf_new = h5py.File(fname_new, 'r')
# go through the file and check if it is exactly the same
for sname in hdf_cache.keys():
if isinstance(hdf_cache[sname], h5py.Dataset):
cvalue = hdf_cache[sname]
cvalue_new = hdf_new[sname]
if ctype is not None:
osname = '%s/%s' % (ctype, sname)
else:
osname = sname
if cvalue.dtype.fields is None:
np.testing.assert_allclose(cvalue.value, cvalue_new.value,
err_msg='testing %s' % (osname),
rtol=1e-6)
else:
for ckey in cvalue.dtype.fields.keys():
err_msg = 'testing %s/%s' % (osname, ckey)
np.testing.assert_allclose(cvalue.value[ckey],
cvalue_new.value[ckey],
err_msg=err_msg,
rtol=1e-5)
| 31.966667 | 75 | 0.533889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,257 | 0.327685 |
6f38b06f669b537017b964e2c9d9bddd9b904d47 | 78,772 | py | Python | sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations/_page_blob_operations.py | jalauzon-msft/azure-sdk-for-python | 15967f5c6d3376f2334a382486ba86339786e028 | [
"MIT"
]
| 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations/_page_blob_operations.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
]
| null | null | null | sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations/_page_blob_operations.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
]
| null | null | null | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._page_blob_operations import build_clear_pages_request, build_copy_incremental_request, build_create_request, build_get_page_ranges_diff_request, build_get_page_ranges_request, build_resize_request, build_update_sequence_number_request, build_upload_pages_from_url_request, build_upload_pages_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PageBlobOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.storage.blob.aio.AzureBlobStorage`'s
:attr:`page_blob` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def create( # pylint: disable=inconsistent-return-statements
self,
content_length: int,
blob_content_length: int,
timeout: Optional[int] = None,
tier: Optional[Union[str, "_models.PremiumPageBlobAccessTier"]] = None,
metadata: Optional[Dict[str, str]] = None,
blob_sequence_number: Optional[int] = 0,
request_id_parameter: Optional[str] = None,
blob_tags_string: Optional[str] = None,
immutability_policy_expiry: Optional[datetime.datetime] = None,
immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None,
legal_hold: Optional[bool] = None,
blob_http_headers: Optional[_models.BlobHTTPHeaders] = None,
lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
cpk_info: Optional[_models.CpkInfo] = None,
cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
**kwargs: Any
) -> None:
"""The Create operation creates a new page blob.
:param content_length: The length of the request.
:type content_length: long
:param blob_content_length: This header specifies the maximum size for the page blob, up to 1
TB. The page blob size must be aligned to a 512-byte boundary.
:type blob_content_length: long
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>`. Default value is None.
:type timeout: int
:param tier: Optional. Indicates the tier to be set on the page blob. Default value is None.
:type tier: str or ~azure.storage.blob.models.PremiumPageBlobAccessTier
:param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
If no name-value pairs are specified, the operation will copy the metadata from the source blob
or file to the destination blob. If one or more name-value pairs are specified, the destination
blob is created with the specified metadata, and metadata is not copied from the source blob or
file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
information. Default value is None.
:type metadata: dict[str, str]
:param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled
value that you can use to track requests. The value of the sequence number must be between 0
and 2^63 - 1. Default value is 0.
:type blob_sequence_number: long
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
value is None.
:type request_id_parameter: str
:param blob_tags_string: Optional. Used to set blob tags in various blob operations. Default
value is None.
:type blob_tags_string: str
:param immutability_policy_expiry: Specifies the date time when the blobs immutability policy
is set to expire. Default value is None.
:type immutability_policy_expiry: ~datetime.datetime
:param immutability_policy_mode: Specifies the immutability policy mode to set on the blob.
Default value is None.
:type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode
:param legal_hold: Specified if a legal hold should be set on the blob. Default value is None.
:type legal_hold: bool
:param blob_http_headers: Parameter group. Default value is None.
:type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
:param lease_access_conditions: Parameter group. Default value is None.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:param cpk_info: Parameter group. Default value is None.
:type cpk_info: ~azure.storage.blob.models.CpkInfo
:param cpk_scope_info: Parameter group. Default value is None.
:type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
:param modified_access_conditions: Parameter group. Default value is None.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword blob_type: Specifies the type of blob to create: block blob, page blob, or append
blob. Default value is "PageBlob". Note that overriding this default value may result in
unsupported behavior.
:paramtype blob_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
blob_type = kwargs.pop('blob_type', _headers.pop('x-ms-blob-type', "PageBlob")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
_blob_content_type = None
_blob_content_encoding = None
_blob_content_language = None
_blob_content_md5 = None
_blob_cache_control = None
_lease_id = None
_blob_content_disposition = None
_encryption_key = None
_encryption_key_sha256 = None
_encryption_algorithm = None
_encryption_scope = None
_if_modified_since = None
_if_unmodified_since = None
_if_match = None
_if_none_match = None
_if_tags = None
if blob_http_headers is not None:
_blob_content_type = blob_http_headers.blob_content_type
_blob_content_encoding = blob_http_headers.blob_content_encoding
_blob_content_language = blob_http_headers.blob_content_language
_blob_content_md5 = blob_http_headers.blob_content_md5
_blob_cache_control = blob_http_headers.blob_cache_control
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
if blob_http_headers is not None:
_blob_content_disposition = blob_http_headers.blob_content_disposition
if cpk_info is not None:
_encryption_key = cpk_info.encryption_key
_encryption_key_sha256 = cpk_info.encryption_key_sha256
_encryption_algorithm = cpk_info.encryption_algorithm
if cpk_scope_info is not None:
_encryption_scope = cpk_scope_info.encryption_scope
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
_if_match = modified_access_conditions.if_match
_if_none_match = modified_access_conditions.if_none_match
_if_tags = modified_access_conditions.if_tags
request = build_create_request(
url=self._config.url,
blob_type=blob_type,
version=self._config.version,
content_length=content_length,
blob_content_length=blob_content_length,
timeout=timeout,
tier=tier,
blob_content_type=_blob_content_type,
blob_content_encoding=_blob_content_encoding,
blob_content_language=_blob_content_language,
blob_content_md5=_blob_content_md5,
blob_cache_control=_blob_cache_control,
metadata=metadata,
lease_id=_lease_id,
blob_content_disposition=_blob_content_disposition,
encryption_key=_encryption_key,
encryption_key_sha256=_encryption_key_sha256,
encryption_algorithm=_encryption_algorithm,
encryption_scope=_encryption_scope,
if_modified_since=_if_modified_since,
if_unmodified_since=_if_unmodified_since,
if_match=_if_match,
if_none_match=_if_none_match,
if_tags=_if_tags,
blob_sequence_number=blob_sequence_number,
request_id_parameter=request_id_parameter,
blob_tags_string=blob_tags_string,
immutability_policy_expiry=immutability_policy_expiry,
immutability_policy_mode=immutability_policy_mode,
legal_hold=legal_hold,
template_url=self.create.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted'))
response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256'))
response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope'))
if cls:
return cls(pipeline_response, None, response_headers)
create.metadata = {'url': "{url}/{containerName}/{blob}"} # type: ignore
@distributed_trace_async
async def upload_pages( # pylint: disable=inconsistent-return-statements
self,
content_length: int,
body: IO,
transactional_content_md5: Optional[bytearray] = None,
transactional_content_crc64: Optional[bytearray] = None,
timeout: Optional[int] = None,
range: Optional[str] = None,
request_id_parameter: Optional[str] = None,
lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
cpk_info: Optional[_models.CpkInfo] = None,
cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
sequence_number_access_conditions: Optional[_models.SequenceNumberAccessConditions] = None,
modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
**kwargs: Any
) -> None:
"""The Upload Pages operation writes a range of pages to a page blob.
:param content_length: The length of the request.
:type content_length: long
:param body: Initial data.
:type body: IO
:param transactional_content_md5: Specify the transactional md5 for the body, to be validated
by the service. Default value is None.
:type transactional_content_md5: bytearray
:param transactional_content_crc64: Specify the transactional crc64 for the body, to be
validated by the service. Default value is None.
:type transactional_content_crc64: bytearray
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>`. Default value is None.
:type timeout: int
:param range: Return only the bytes of the blob in the specified range. Default value is None.
:type range: str
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
value is None.
:type request_id_parameter: str
:param lease_access_conditions: Parameter group. Default value is None.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:param cpk_info: Parameter group. Default value is None.
:type cpk_info: ~azure.storage.blob.models.CpkInfo
:param cpk_scope_info: Parameter group. Default value is None.
:type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
:param sequence_number_access_conditions: Parameter group. Default value is None.
:type sequence_number_access_conditions:
~azure.storage.blob.models.SequenceNumberAccessConditions
:param modified_access_conditions: Parameter group. Default value is None.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword comp: comp. Default value is "page". Note that overriding this default value may
result in unsupported behavior.
:paramtype comp: str
:keyword page_write: Required. You may specify one of the following options:
* Update: Writes the bytes specified by the request body into the specified range. The Range
and Content-Length headers must match to perform the update.
* Clear: Clears the specified range and releases the space used in storage for that range. To
clear a range, set the Content-Length header to zero, and the Range header to a value that
indicates the range to clear, up to maximum blob size. Default value is "update". Note that
overriding this default value may result in unsupported behavior.
:paramtype page_write: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
comp = kwargs.pop('comp', _params.pop('comp', "page")) # type: str
page_write = kwargs.pop('page_write', _headers.pop('x-ms-page-write', "update")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/octet-stream")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[None]
_lease_id = None
_encryption_key = None
_encryption_key_sha256 = None
_encryption_algorithm = None
_encryption_scope = None
_if_sequence_number_less_than_or_equal_to = None
_if_sequence_number_less_than = None
_if_sequence_number_equal_to = None
_if_modified_since = None
_if_unmodified_since = None
_if_match = None
_if_none_match = None
_if_tags = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
if cpk_info is not None:
_encryption_key = cpk_info.encryption_key
_encryption_key_sha256 = cpk_info.encryption_key_sha256
_encryption_algorithm = cpk_info.encryption_algorithm
if cpk_scope_info is not None:
_encryption_scope = cpk_scope_info.encryption_scope
if sequence_number_access_conditions is not None:
_if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
_if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
_if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
_if_match = modified_access_conditions.if_match
_if_none_match = modified_access_conditions.if_none_match
_if_tags = modified_access_conditions.if_tags
_content = body
request = build_upload_pages_request(
url=self._config.url,
comp=comp,
page_write=page_write,
version=self._config.version,
content_type=content_type,
content=_content,
content_length=content_length,
transactional_content_md5=transactional_content_md5,
transactional_content_crc64=transactional_content_crc64,
timeout=timeout,
range=range,
lease_id=_lease_id,
encryption_key=_encryption_key,
encryption_key_sha256=_encryption_key_sha256,
encryption_algorithm=_encryption_algorithm,
encryption_scope=_encryption_scope,
if_sequence_number_less_than_or_equal_to=_if_sequence_number_less_than_or_equal_to,
if_sequence_number_less_than=_if_sequence_number_less_than,
if_sequence_number_equal_to=_if_sequence_number_equal_to,
if_modified_since=_if_modified_since,
if_unmodified_since=_if_unmodified_since,
if_match=_if_match,
if_none_match=_if_none_match,
if_tags=_if_tags,
request_id_parameter=request_id_parameter,
template_url=self.upload_pages.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5'))
response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64'))
response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted'))
response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256'))
response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope'))
if cls:
return cls(pipeline_response, None, response_headers)
upload_pages.metadata = {'url': "{url}/{containerName}/{blob}"} # type: ignore
@distributed_trace_async
async def clear_pages( # pylint: disable=inconsistent-return-statements
self,
content_length: int,
timeout: Optional[int] = None,
range: Optional[str] = None,
request_id_parameter: Optional[str] = None,
lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
cpk_info: Optional[_models.CpkInfo] = None,
cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
sequence_number_access_conditions: Optional[_models.SequenceNumberAccessConditions] = None,
modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
**kwargs: Any
) -> None:
"""The Clear Pages operation clears a set of pages from a page blob.
:param content_length: The length of the request.
:type content_length: long
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>`. Default value is None.
:type timeout: int
:param range: Return only the bytes of the blob in the specified range. Default value is None.
:type range: str
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
value is None.
:type request_id_parameter: str
:param lease_access_conditions: Parameter group. Default value is None.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:param cpk_info: Parameter group. Default value is None.
:type cpk_info: ~azure.storage.blob.models.CpkInfo
:param cpk_scope_info: Parameter group. Default value is None.
:type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
:param sequence_number_access_conditions: Parameter group. Default value is None.
:type sequence_number_access_conditions:
~azure.storage.blob.models.SequenceNumberAccessConditions
:param modified_access_conditions: Parameter group. Default value is None.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword comp: comp. Default value is "page". Note that overriding this default value may
result in unsupported behavior.
:paramtype comp: str
:keyword page_write: Required. You may specify one of the following options:
* Update: Writes the bytes specified by the request body into the specified range. The Range
and Content-Length headers must match to perform the update.
* Clear: Clears the specified range and releases the space used in storage for that range. To
clear a range, set the Content-Length header to zero, and the Range header to a value that
indicates the range to clear, up to maximum blob size. Default value is "clear". Note that
overriding this default value may result in unsupported behavior.
:paramtype page_write: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
comp = kwargs.pop('comp', _params.pop('comp', "page")) # type: str
page_write = kwargs.pop('page_write', _headers.pop('x-ms-page-write', "clear")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
_lease_id = None
_encryption_key = None
_encryption_key_sha256 = None
_encryption_algorithm = None
_encryption_scope = None
_if_sequence_number_less_than_or_equal_to = None
_if_sequence_number_less_than = None
_if_sequence_number_equal_to = None
_if_modified_since = None
_if_unmodified_since = None
_if_match = None
_if_none_match = None
_if_tags = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
if cpk_info is not None:
_encryption_key = cpk_info.encryption_key
_encryption_key_sha256 = cpk_info.encryption_key_sha256
_encryption_algorithm = cpk_info.encryption_algorithm
if cpk_scope_info is not None:
_encryption_scope = cpk_scope_info.encryption_scope
if sequence_number_access_conditions is not None:
_if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
_if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
_if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
_if_match = modified_access_conditions.if_match
_if_none_match = modified_access_conditions.if_none_match
_if_tags = modified_access_conditions.if_tags
request = build_clear_pages_request(
url=self._config.url,
comp=comp,
page_write=page_write,
version=self._config.version,
content_length=content_length,
timeout=timeout,
range=range,
lease_id=_lease_id,
encryption_key=_encryption_key,
encryption_key_sha256=_encryption_key_sha256,
encryption_algorithm=_encryption_algorithm,
encryption_scope=_encryption_scope,
if_sequence_number_less_than_or_equal_to=_if_sequence_number_less_than_or_equal_to,
if_sequence_number_less_than=_if_sequence_number_less_than,
if_sequence_number_equal_to=_if_sequence_number_equal_to,
if_modified_since=_if_modified_since,
if_unmodified_since=_if_unmodified_since,
if_match=_if_match,
if_none_match=_if_none_match,
if_tags=_if_tags,
request_id_parameter=request_id_parameter,
template_url=self.clear_pages.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5'))
response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64'))
response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
clear_pages.metadata = {'url': "{url}/{containerName}/{blob}"} # type: ignore
@distributed_trace_async
async def upload_pages_from_url( # pylint: disable=inconsistent-return-statements
self,
source_url: str,
source_range: str,
content_length: int,
range: str,
source_content_md5: Optional[bytearray] = None,
source_contentcrc64: Optional[bytearray] = None,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
copy_source_authorization: Optional[str] = None,
cpk_info: Optional[_models.CpkInfo] = None,
cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
sequence_number_access_conditions: Optional[_models.SequenceNumberAccessConditions] = None,
modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
**kwargs: Any
) -> None:
"""The Upload Pages operation writes a range of pages to a page blob where the contents are read
from a URL.
:param source_url: Specify a URL to the copy source.
:type source_url: str
:param source_range: Bytes of source data in the specified range. The length of this range
should match the ContentLength header and x-ms-range/Range destination range header.
:type source_range: str
:param content_length: The length of the request.
:type content_length: long
:param range: The range of bytes to which the source range would be written. The range should
be 512 aligned and range-end is required.
:type range: str
:param source_content_md5: Specify the md5 calculated for the range of bytes that must be read
from the copy source. Default value is None.
:type source_content_md5: bytearray
:param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be
read from the copy source. Default value is None.
:type source_contentcrc64: bytearray
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>`. Default value is None.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
value is None.
:type request_id_parameter: str
:param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid
OAuth access token to copy source. Default value is None.
:type copy_source_authorization: str
:param cpk_info: Parameter group. Default value is None.
:type cpk_info: ~azure.storage.blob.models.CpkInfo
:param cpk_scope_info: Parameter group. Default value is None.
:type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
:param lease_access_conditions: Parameter group. Default value is None.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:param sequence_number_access_conditions: Parameter group. Default value is None.
:type sequence_number_access_conditions:
~azure.storage.blob.models.SequenceNumberAccessConditions
:param modified_access_conditions: Parameter group. Default value is None.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:param source_modified_access_conditions: Parameter group. Default value is None.
:type source_modified_access_conditions:
~azure.storage.blob.models.SourceModifiedAccessConditions
:keyword comp: comp. Default value is "page". Note that overriding this default value may
result in unsupported behavior.
:paramtype comp: str
:keyword page_write: Required. You may specify one of the following options:
* Update: Writes the bytes specified by the request body into the specified range. The Range
and Content-Length headers must match to perform the update.
* Clear: Clears the specified range and releases the space used in storage for that range. To
clear a range, set the Content-Length header to zero, and the Range header to a value that
indicates the range to clear, up to maximum blob size. Default value is "update". Note that
overriding this default value may result in unsupported behavior.
:paramtype page_write: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
comp = kwargs.pop('comp', _params.pop('comp', "page")) # type: str
page_write = kwargs.pop('page_write', _headers.pop('x-ms-page-write', "update")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
_encryption_key = None
_encryption_key_sha256 = None
_encryption_algorithm = None
_encryption_scope = None
_lease_id = None
_if_sequence_number_less_than_or_equal_to = None
_if_sequence_number_less_than = None
_if_sequence_number_equal_to = None
_if_modified_since = None
_if_unmodified_since = None
_if_match = None
_if_none_match = None
_if_tags = None
_source_if_modified_since = None
_source_if_unmodified_since = None
_source_if_match = None
_source_if_none_match = None
if cpk_info is not None:
_encryption_key = cpk_info.encryption_key
_encryption_key_sha256 = cpk_info.encryption_key_sha256
_encryption_algorithm = cpk_info.encryption_algorithm
if cpk_scope_info is not None:
_encryption_scope = cpk_scope_info.encryption_scope
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
if sequence_number_access_conditions is not None:
_if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
_if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
_if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
_if_match = modified_access_conditions.if_match
_if_none_match = modified_access_conditions.if_none_match
_if_tags = modified_access_conditions.if_tags
if source_modified_access_conditions is not None:
_source_if_modified_since = source_modified_access_conditions.source_if_modified_since
_source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
_source_if_match = source_modified_access_conditions.source_if_match
_source_if_none_match = source_modified_access_conditions.source_if_none_match
request = build_upload_pages_from_url_request(
url=self._config.url,
comp=comp,
page_write=page_write,
version=self._config.version,
source_url=source_url,
source_range=source_range,
content_length=content_length,
range=range,
source_content_md5=source_content_md5,
source_contentcrc64=source_contentcrc64,
timeout=timeout,
encryption_key=_encryption_key,
encryption_key_sha256=_encryption_key_sha256,
encryption_algorithm=_encryption_algorithm,
encryption_scope=_encryption_scope,
lease_id=_lease_id,
if_sequence_number_less_than_or_equal_to=_if_sequence_number_less_than_or_equal_to,
if_sequence_number_less_than=_if_sequence_number_less_than,
if_sequence_number_equal_to=_if_sequence_number_equal_to,
if_modified_since=_if_modified_since,
if_unmodified_since=_if_unmodified_since,
if_match=_if_match,
if_none_match=_if_none_match,
if_tags=_if_tags,
source_if_modified_since=_source_if_modified_since,
source_if_unmodified_since=_source_if_unmodified_since,
source_if_match=_source_if_match,
source_if_none_match=_source_if_none_match,
request_id_parameter=request_id_parameter,
copy_source_authorization=copy_source_authorization,
template_url=self.upload_pages_from_url.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5'))
response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64'))
response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted'))
response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256'))
response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope'))
if cls:
return cls(pipeline_response, None, response_headers)
upload_pages_from_url.metadata = {'url': "{url}/{containerName}/{blob}"} # type: ignore
@distributed_trace_async
async def get_page_ranges(
self,
snapshot: Optional[str] = None,
timeout: Optional[int] = None,
range: Optional[str] = None,
request_id_parameter: Optional[str] = None,
marker: Optional[str] = None,
maxresults: Optional[int] = None,
lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
**kwargs: Any
) -> _models.PageList:
"""The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot
of a page blob.
:param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
see :code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
a Snapshot of a Blob.</a>`. Default value is None.
:type snapshot: str
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>`. Default value is None.
:type timeout: int
:param range: Return only the bytes of the blob in the specified range. Default value is None.
:type range: str
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
value is None.
:type request_id_parameter: str
:param marker: A string value that identifies the portion of the list of containers to be
returned with the next listing operation. The operation returns the NextMarker value within the
response body if the listing operation did not return all containers remaining to be listed
with the current page. The NextMarker value can be used as the value for the marker parameter
in a subsequent call to request the next page of list items. The marker value is opaque to the
client. Default value is None.
:type marker: str
:param maxresults: Specifies the maximum number of containers to return. If the request does
not specify maxresults, or specifies a value greater than 5000, the server will return up to
5000 items. Note that if the listing operation crosses a partition boundary, then the service
will return a continuation token for retrieving the remainder of the results. For this reason,
it is possible that the service will return fewer results than specified by maxresults, or than
the default of 5000. Default value is None.
:type maxresults: int
:param lease_access_conditions: Parameter group. Default value is None.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:param modified_access_conditions: Parameter group. Default value is None.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword comp: comp. Default value is "pagelist". Note that overriding this default value may
result in unsupported behavior.
:paramtype comp: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PageList, or the result of cls(response)
:rtype: ~azure.storage.blob.models.PageList
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
comp = kwargs.pop('comp', _params.pop('comp', "pagelist")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.PageList]
_lease_id = None
_if_modified_since = None
_if_unmodified_since = None
_if_match = None
_if_none_match = None
_if_tags = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
_if_match = modified_access_conditions.if_match
_if_none_match = modified_access_conditions.if_none_match
_if_tags = modified_access_conditions.if_tags
request = build_get_page_ranges_request(
url=self._config.url,
comp=comp,
version=self._config.version,
snapshot=snapshot,
timeout=timeout,
range=range,
lease_id=_lease_id,
if_modified_since=_if_modified_since,
if_unmodified_since=_if_unmodified_since,
if_match=_if_match,
if_none_match=_if_none_match,
if_tags=_if_tags,
request_id_parameter=request_id_parameter,
marker=marker,
maxresults=maxresults,
template_url=self.get_page_ranges.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('PageList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_page_ranges.metadata = {'url': "{url}/{containerName}/{blob}"} # type: ignore
@distributed_trace_async
async def get_page_ranges_diff(
self,
snapshot: Optional[str] = None,
timeout: Optional[int] = None,
prevsnapshot: Optional[str] = None,
prev_snapshot_url: Optional[str] = None,
range: Optional[str] = None,
request_id_parameter: Optional[str] = None,
marker: Optional[str] = None,
maxresults: Optional[int] = None,
lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
**kwargs: Any
) -> _models.PageList:
"""The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that
were changed between target blob and previous snapshot.
:param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
see :code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
a Snapshot of a Blob.</a>`. Default value is None.
:type snapshot: str
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>`. Default value is None.
:type timeout: int
:param prevsnapshot: Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a
DateTime value that specifies that the response will contain only pages that were changed
between target blob and previous snapshot. Changed pages include both updated and cleared
pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is
the older of the two. Note that incremental snapshots are currently supported only for blobs
created on or after January 1, 2016. Default value is None.
:type prevsnapshot: str
:param prev_snapshot_url: Optional. This header is only supported in service versions
2019-04-19 and after and specifies the URL of a previous snapshot of the target blob. The
response will only contain pages that were changed between the target blob and its previous
snapshot. Default value is None.
:type prev_snapshot_url: str
:param range: Return only the bytes of the blob in the specified range. Default value is None.
:type range: str
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
value is None.
:type request_id_parameter: str
:param marker: A string value that identifies the portion of the list of containers to be
returned with the next listing operation. The operation returns the NextMarker value within the
response body if the listing operation did not return all containers remaining to be listed
with the current page. The NextMarker value can be used as the value for the marker parameter
in a subsequent call to request the next page of list items. The marker value is opaque to the
client. Default value is None.
:type marker: str
:param maxresults: Specifies the maximum number of containers to return. If the request does
not specify maxresults, or specifies a value greater than 5000, the server will return up to
5000 items. Note that if the listing operation crosses a partition boundary, then the service
will return a continuation token for retrieving the remainder of the results. For this reason,
it is possible that the service will return fewer results than specified by maxresults, or than
the default of 5000. Default value is None.
:type maxresults: int
:param lease_access_conditions: Parameter group. Default value is None.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:param modified_access_conditions: Parameter group. Default value is None.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword comp: comp. Default value is "pagelist". Note that overriding this default value may
result in unsupported behavior.
:paramtype comp: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PageList, or the result of cls(response)
:rtype: ~azure.storage.blob.models.PageList
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
comp = kwargs.pop('comp', _params.pop('comp', "pagelist")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.PageList]
_lease_id = None
_if_modified_since = None
_if_unmodified_since = None
_if_match = None
_if_none_match = None
_if_tags = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
_if_match = modified_access_conditions.if_match
_if_none_match = modified_access_conditions.if_none_match
_if_tags = modified_access_conditions.if_tags
request = build_get_page_ranges_diff_request(
url=self._config.url,
comp=comp,
version=self._config.version,
snapshot=snapshot,
timeout=timeout,
prevsnapshot=prevsnapshot,
prev_snapshot_url=prev_snapshot_url,
range=range,
lease_id=_lease_id,
if_modified_since=_if_modified_since,
if_unmodified_since=_if_unmodified_since,
if_match=_if_match,
if_none_match=_if_none_match,
if_tags=_if_tags,
request_id_parameter=request_id_parameter,
marker=marker,
maxresults=maxresults,
template_url=self.get_page_ranges_diff.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('PageList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_page_ranges_diff.metadata = {'url': "{url}/{containerName}/{blob}"} # type: ignore
@distributed_trace_async
async def resize( # pylint: disable=inconsistent-return-statements
self,
blob_content_length: int,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
cpk_info: Optional[_models.CpkInfo] = None,
cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
**kwargs: Any
) -> None:
"""Resize the Blob.
:param blob_content_length: This header specifies the maximum size for the page blob, up to 1
TB. The page blob size must be aligned to a 512-byte boundary.
:type blob_content_length: long
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>`. Default value is None.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
value is None.
:type request_id_parameter: str
:param lease_access_conditions: Parameter group. Default value is None.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:param cpk_info: Parameter group. Default value is None.
:type cpk_info: ~azure.storage.blob.models.CpkInfo
:param cpk_scope_info: Parameter group. Default value is None.
:type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
:param modified_access_conditions: Parameter group. Default value is None.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword comp: comp. Default value is "properties". Note that overriding this default value may
result in unsupported behavior.
:paramtype comp: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
comp = kwargs.pop('comp', _params.pop('comp', "properties")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
_lease_id = None
_encryption_key = None
_encryption_key_sha256 = None
_encryption_algorithm = None
_encryption_scope = None
_if_modified_since = None
_if_unmodified_since = None
_if_match = None
_if_none_match = None
_if_tags = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
if cpk_info is not None:
_encryption_key = cpk_info.encryption_key
_encryption_key_sha256 = cpk_info.encryption_key_sha256
_encryption_algorithm = cpk_info.encryption_algorithm
if cpk_scope_info is not None:
_encryption_scope = cpk_scope_info.encryption_scope
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
_if_match = modified_access_conditions.if_match
_if_none_match = modified_access_conditions.if_none_match
_if_tags = modified_access_conditions.if_tags
request = build_resize_request(
url=self._config.url,
comp=comp,
version=self._config.version,
blob_content_length=blob_content_length,
timeout=timeout,
lease_id=_lease_id,
encryption_key=_encryption_key,
encryption_key_sha256=_encryption_key_sha256,
encryption_algorithm=_encryption_algorithm,
encryption_scope=_encryption_scope,
if_modified_since=_if_modified_since,
if_unmodified_since=_if_unmodified_since,
if_match=_if_match,
if_none_match=_if_none_match,
if_tags=_if_tags,
request_id_parameter=request_id_parameter,
template_url=self.resize.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
resize.metadata = {'url': "{url}/{containerName}/{blob}"} # type: ignore
@distributed_trace_async
async def update_sequence_number( # pylint: disable=inconsistent-return-statements
self,
sequence_number_action: Union[str, "_models.SequenceNumberActionType"],
timeout: Optional[int] = None,
blob_sequence_number: Optional[int] = 0,
request_id_parameter: Optional[str] = None,
lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
**kwargs: Any
) -> None:
"""Update the sequence number of the blob.
:param sequence_number_action: Required if the x-ms-blob-sequence-number header is set for the
request. This property applies to page blobs only. This property indicates how the service
should modify the blob's sequence number.
:type sequence_number_action: str or ~azure.storage.blob.models.SequenceNumberActionType
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>`. Default value is None.
:type timeout: int
:param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled
value that you can use to track requests. The value of the sequence number must be between 0
and 2^63 - 1. Default value is 0.
:type blob_sequence_number: long
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
value is None.
:type request_id_parameter: str
:param lease_access_conditions: Parameter group. Default value is None.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:param modified_access_conditions: Parameter group. Default value is None.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword comp: comp. Default value is "properties". Note that overriding this default value may
result in unsupported behavior.
:paramtype comp: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
comp = kwargs.pop('comp', _params.pop('comp', "properties")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
_lease_id = None
_if_modified_since = None
_if_unmodified_since = None
_if_match = None
_if_none_match = None
_if_tags = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
_if_match = modified_access_conditions.if_match
_if_none_match = modified_access_conditions.if_none_match
_if_tags = modified_access_conditions.if_tags
request = build_update_sequence_number_request(
url=self._config.url,
comp=comp,
version=self._config.version,
sequence_number_action=sequence_number_action,
timeout=timeout,
lease_id=_lease_id,
if_modified_since=_if_modified_since,
if_unmodified_since=_if_unmodified_since,
if_match=_if_match,
if_none_match=_if_none_match,
if_tags=_if_tags,
blob_sequence_number=blob_sequence_number,
request_id_parameter=request_id_parameter,
template_url=self.update_sequence_number.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
update_sequence_number.metadata = {'url': "{url}/{containerName}/{blob}"} # type: ignore
@distributed_trace_async
async def copy_incremental( # pylint: disable=inconsistent-return-statements
self,
copy_source: str,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
**kwargs: Any
) -> None:
"""The Copy Incremental operation copies a snapshot of the source page blob to a destination page
blob. The snapshot is copied such that only the differential changes between the previously
copied snapshot are transferred to the destination. The copied snapshots are complete copies of
the original snapshot and can be read or copied from as usual. This API is supported since REST
version 2016-05-31.
:param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of
up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it
would appear in a request URI. The source blob must either be public or must be authenticated
via a shared access signature.
:type copy_source: str
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>`. Default value is None.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
value is None.
:type request_id_parameter: str
:param modified_access_conditions: Parameter group. Default value is None.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword comp: comp. Default value is "incrementalcopy". Note that overriding this default
value may result in unsupported behavior.
:paramtype comp: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
comp = kwargs.pop('comp', _params.pop('comp', "incrementalcopy")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
_if_modified_since = None
_if_unmodified_since = None
_if_match = None
_if_none_match = None
_if_tags = None
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
_if_match = modified_access_conditions.if_match
_if_none_match = modified_access_conditions.if_none_match
_if_tags = modified_access_conditions.if_tags
request = build_copy_incremental_request(
url=self._config.url,
comp=comp,
version=self._config.version,
copy_source=copy_source,
timeout=timeout,
if_modified_since=_if_modified_since,
if_unmodified_since=_if_unmodified_since,
if_match=_if_match,
if_none_match=_if_none_match,
if_tags=_if_tags,
request_id_parameter=request_id_parameter,
template_url=self.copy_incremental.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id'))
response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status'))
if cls:
return cls(pipeline_response, None, response_headers)
copy_incremental.metadata = {'url': "{url}/{containerName}/{blob}"} # type: ignore
| 54.551247 | 319 | 0.692441 | 77,261 | 0.980818 | 0 | 0 | 75,690 | 0.960874 | 75,429 | 0.957561 | 34,411 | 0.436843 |
6f38ecb37fdc239d1019da968ae8c9a2467372bc | 4,494 | py | Python | Win_Source/ESP_Autostart.py | maschhoff/ESP32-433Mhz-Receiver-and-Tools | a7cb8c0740054650d38444781d2b7b6c18779a29 | [
"MIT"
]
| 3 | 2020-11-29T18:38:48.000Z | 2022-02-23T15:13:56.000Z | Win_Source/ESP_Autostart.py | maschhoff/ESP32-433Mhz-Receiver-and-Tools | a7cb8c0740054650d38444781d2b7b6c18779a29 | [
"MIT"
]
| null | null | null | Win_Source/ESP_Autostart.py | maschhoff/ESP32-433Mhz-Receiver-and-Tools | a7cb8c0740054650d38444781d2b7b6c18779a29 | [
"MIT"
]
| 2 | 2021-07-25T18:03:12.000Z | 2021-07-26T11:50:14.000Z | # Detlev Aschhoff [email protected]
# The MIT License (MIT)
#
# Copyright (c) 2020
#
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time
from tkinter import *
from tkinter import ttk
from tkinter.messagebox import *
import serial
root=Tk()
root.title("ESP Autostart Changer")
err=""
def serialOn():
global ser
for port in range(3,9):
comport="COM"+str(port)+":"
try:
ser = serial.Serial(port=comport,baudrate=115200)
serialopen=True
except Exception as e:
#print ("error open serial port: " + str(e))
serialopen=False
if serialopen == True:
#ESPsend(chr(4))
ESPsend(chr(3))
time.sleep(1)
if ser.inWaiting() != 0:
ser.read()
return (comport)
else:
serialopen=False
return ("Error")
def ESPsend(out):
out+="\r\n"
out=out.encode("utf-8")
ser.write(out)
time.sleep(0.1)
def autooff():
if ser.isOpen() == False:start()
ESPsend("import os")
ESPsend("os.rename('main.py','mainxxx.py')")
time.sleep(0.5)
res=""
while ser.inWaiting() != 0:
a=ser.read()
res+=a.decode("utf-8")
pos=res.find("OSError")
if pos==-1:
hinweistxt="Autostart is off"
else:
hinweistxt="Autostart already off"
hinweis.config(text=hinweistxt)
stop()
def autoon():
if ser.isOpen() == False:start()
ESPsend("import os")
ESPsend("os.rename('mainxxx.py','main.py')")
res=""
while ser.inWaiting() != 0:
a=ser.read()
res+=a.decode("utf-8")
pos=res.find("OSError")
if pos==-1:
hinweistxt="Autostart is on"
else:
hinweistxt="Autostart already on"
hinweis.config(text=hinweistxt)
stop()
def stop():
ser.close()
def start():
while True:
res=""
err=serialOn()
if err!="Error":
statustxt="ESP connectet on: "+err
status.config(text=statustxt)
ESPsend("import os")
ESPsend("os.listdir()")
while ser.inWaiting() != 0:
a=ser.read()
res+=a.decode("utf-8")
if "main.py" in res:
hinweistxt="Autostart is on"
else:
hinweistxt="Autostart is off"
hinweis.config(text=hinweistxt)
break
else:
if askyesno("No ESP found!!! Try again?"):
ser.close()
pass
else:
exit()
#----------------------------------------------------------------------------------
#---------- Witgets laden
frameButton = Frame(root)
frameButton.pack(fill='both')
button2=Button(frameButton, text="Autostart ON ", command=autoon)
button2.pack(side="right",padx="5",pady="2")
button1=Button(frameButton, text="Autostart OFF ", command=autooff)
button1.pack(side="right",padx="5")
hinweis = Label(root, fg = "lightgreen",bg = "gray", font = "Verdana 10 bold" )
hinweis.pack(fill='both',padx="5",pady="2")
hinweistxt="Change Autostart "
hinweis.config(text=hinweistxt)
status = Label(root)
status.pack(fill='both',padx="5",pady="2")
statustxt=" "
status.config(text=statustxt)
#------------------------------------------------------------------------------------
start()
root.mainloop()
| 29.372549 | 86 | 0.574099 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,962 | 0.436582 |
6f392ad202bb9d010a7064f5991bc4aec4981e22 | 212 | py | Python | app/core/apps.py | KarimTayie/djangoadmin-test | 7218866dbf72ae580e605d2f32601557efe1baca | [
"MIT"
]
| null | null | null | app/core/apps.py | KarimTayie/djangoadmin-test | 7218866dbf72ae580e605d2f32601557efe1baca | [
"MIT"
]
| 1 | 2019-12-18T16:01:44.000Z | 2019-12-18T16:01:44.000Z | app/core/apps.py | KarimTayie/djangoadmin-test | 7218866dbf72ae580e605d2f32601557efe1baca | [
"MIT"
]
| null | null | null | from django.apps import AppConfig
from django.contrib.admin.apps import AdminConfig
class CoreConfig(AppConfig):
name = "core"
class AppAdminConfig(AdminConfig):
default_site = "core.admin.AppAdmin"
| 17.666667 | 49 | 0.768868 | 121 | 0.570755 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.127358 |
6f3a244598ce08d2133e90a30870681650f7eccc | 1,249 | py | Python | spike/compiler/Node.py | spikeio/lang | 33be3b51eaf22f873ddc94f2f9e381f605a2499f | [
"BSD-2-Clause"
]
| 1 | 2020-06-15T07:38:12.000Z | 2020-06-15T07:38:12.000Z | spike/compiler/Node.py | spikeio/lang | 33be3b51eaf22f873ddc94f2f9e381f605a2499f | [
"BSD-2-Clause"
]
| 1 | 2021-11-12T11:01:45.000Z | 2021-11-12T11:01:45.000Z | spike/compiler/Node.py | spikeio/lang | 33be3b51eaf22f873ddc94f2f9e381f605a2499f | [
"BSD-2-Clause"
]
| null | null | null |
class Node(object):
# XXX: legacy code support
kind = property(lambda self: self.__class__)
def _iterChildren(self):
for name in self.childAttrNames:
yield (name, getattr(self, name))
return
children = property(_iterChildren)
def dump(self, stream, indent=0):
# XXX: Expand argument lists? Show declspecs? (Ditto for
# 'graphviz'.)
for attr, child in self.children:
print >>stream, "%s%s: %r" % (" " * indent, attr, child)
if hasattr(child, 'dump'):
child.dump(stream, indent + 1)
return
def graphviz(self, stream):
print >>stream, ' n%d[label="%r"];' % (id(self), self)
for attr, child in self.children:
if child is None:
pass
elif hasattr(child, 'graphviz'):
child.graphviz(stream)
else:
print >>stream, ' n%d[label="%r"];' % (id(child), child)
print >>stream
for attr, child in self.children:
if child is None:
continue
print >>stream, ' n%d->n%d[label="%s"];' % (
id(self), id(child), attr)
return
| 27.152174 | 75 | 0.5004 | 1,246 | 0.997598 | 126 | 0.100881 | 0 | 0 | 0 | 0 | 201 | 0.160929 |
6f3bc48d07d6db347089edf80b48b6fd74fd6c76 | 2,108 | py | Python | download_cifar100_teacher.py | valeoai/QuEST | 02a23d2d8e0d059b4a30433f92eec5db146467f4 | [
"Apache-2.0"
]
| 3 | 2021-06-03T22:45:47.000Z | 2022-03-27T18:50:06.000Z | download_cifar100_teacher.py | valeoai/QuEST | 02a23d2d8e0d059b4a30433f92eec5db146467f4 | [
"Apache-2.0"
]
| null | null | null | download_cifar100_teacher.py | valeoai/QuEST | 02a23d2d8e0d059b4a30433f92eec5db146467f4 | [
"Apache-2.0"
]
| 1 | 2021-08-20T15:39:40.000Z | 2021-08-20T15:39:40.000Z | import os
import urllib.request
os.makedirs('saved_models', exist_ok=True)
model_path = 'http://shape2prog.csail.mit.edu/repo/wrn_40_2_vanilla/ckpt_epoch_240.pth'
model_dir = 'saved_models/wrn_40_2_vanilla'
os.makedirs(model_dir, exist_ok=True)
urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1]))
print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/")
model_path = 'http://shape2prog.csail.mit.edu/repo/resnet56_vanilla/ckpt_epoch_240.pth'
model_dir = 'saved_models/resnet56_vanilla'
os.makedirs(model_dir, exist_ok=True)
urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1]))
print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/")
model_path = 'http://shape2prog.csail.mit.edu/repo/resnet110_vanilla/ckpt_epoch_240.pth'
model_dir = 'saved_models/resnet110_vanilla'
os.makedirs(model_dir, exist_ok=True)
urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1]))
print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/")
model_path = 'http://shape2prog.csail.mit.edu/repo/resnet32x4_vanilla/ckpt_epoch_240.pth'
model_dir = 'saved_models/resnet32x4_vanilla'
os.makedirs(model_dir, exist_ok=True)
urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1]))
print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/")
model_path = 'http://shape2prog.csail.mit.edu/repo/vgg13_vanilla/ckpt_epoch_240.pth'
model_dir = 'saved_models/vgg13_vanilla'
os.makedirs(model_dir, exist_ok=True)
urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1]))
print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/")
model_path = 'http://shape2prog.csail.mit.edu/repo/ResNet50_vanilla/ckpt_epoch_240.pth'
model_dir = 'saved_models/ResNet50_vanilla'
os.makedirs(model_dir, exist_ok=True)
urllib.request.urlretrieve(model_path, os.path.join(model_dir, model_path.split('/')[-1]))
print(f"Downloaded {model_path.split('repo/')[-1]} to saved_models/")
| 51.414634 | 91 | 0.766129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,034 | 0.490512 |
6f3bd5a39dfdffc25d3e3bcdbc5be1926e9811b6 | 48 | py | Python | aliyunsdkcore/__init__.py | gikoluo/aliyun-python-sdk-core | 5c4e79ad5f7668af048ae1a18d424c4919131a9c | [
"MIT"
]
| null | null | null | aliyunsdkcore/__init__.py | gikoluo/aliyun-python-sdk-core | 5c4e79ad5f7668af048ae1a18d424c4919131a9c | [
"MIT"
]
| null | null | null | aliyunsdkcore/__init__.py | gikoluo/aliyun-python-sdk-core | 5c4e79ad5f7668af048ae1a18d424c4919131a9c | [
"MIT"
]
| 4 | 2017-07-27T11:27:01.000Z | 2020-09-01T07:49:21.000Z | __author__ = 'alex jiang'
__version__ = '2.3.3'
| 16 | 25 | 0.6875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.395833 |
6f3cd19601af3a6ec8e27fb00bfee8d9af472214 | 95,791 | py | Python | leaderboard/scenarios/background_activity.py | casper-auto/leaderboard | 111a48f9099c08a2f1068ee8aea2ad56ce52ef9d | [
"MIT"
]
| 68 | 2020-03-25T10:04:21.000Z | 2022-03-21T01:03:39.000Z | leaderboard/scenarios/background_activity.py | casper-auto/leaderboard | 111a48f9099c08a2f1068ee8aea2ad56ce52ef9d | [
"MIT"
]
| 32 | 2020-06-16T22:11:05.000Z | 2022-03-24T09:35:48.000Z | leaderboard/scenarios/background_activity.py | casper-auto/leaderboard | 111a48f9099c08a2f1068ee8aea2ad56ce52ef9d | [
"MIT"
]
| 40 | 2020-03-21T23:43:39.000Z | 2022-01-03T14:04:31.000Z | #!/usr/bin/env python
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Scenario spawning elements to make the town dynamic and interesting
"""
import math
from collections import OrderedDict
import py_trees
import numpy as np
import carla
from srunner.scenariomanager.carla_data_provider import CarlaDataProvider
from srunner.scenariomanager.scenarioatomics.atomic_behaviors import AtomicBehavior
from srunner.scenarios.basic_scenario import BasicScenario
DEBUG_COLORS = {
'road': carla.Color(0, 0, 255), # Blue
'opposite': carla.Color(255, 0, 0), # Red
'junction': carla.Color(0, 0, 0), # Black
'entry': carla.Color(255, 255, 0), # Yellow
'exit': carla.Color(0, 255, 255), # Teal
'connect': carla.Color(0, 255, 0), # Green
}
DEBUG_TYPE = {
'small': [0.8, 0.1],
'medium': [0.5, 0.15],
'large': [0.2, 0.2],
}
def draw_string(world, location, string='', debug_type='road', persistent=False):
"""Utility function to draw debugging strings"""
v_shift, _ = DEBUG_TYPE.get('small')
l_shift = carla.Location(z=v_shift)
color = DEBUG_COLORS.get(debug_type, 'road')
life_time = 0.07 if not persistent else 100000
world.debug.draw_string(location + l_shift, string, False, color, life_time)
def draw_point(world, location, point_type='small', debug_type='road', persistent=False):
"""Utility function to draw debugging points"""
v_shift, size = DEBUG_TYPE.get(point_type, 'small')
l_shift = carla.Location(z=v_shift)
color = DEBUG_COLORS.get(debug_type, 'road')
life_time = 0.07 if not persistent else 100000
world.debug.draw_point(location + l_shift, size, color, life_time)
def get_same_dir_lanes(waypoint):
"""Gets all the lanes with the same direction of the road of a wp"""
same_dir_wps = [waypoint]
# Check roads on the right
right_wp = waypoint
while True:
possible_right_wp = right_wp.get_right_lane()
if possible_right_wp is None or possible_right_wp.lane_type != carla.LaneType.Driving:
break
right_wp = possible_right_wp
same_dir_wps.append(right_wp)
# Check roads on the left
left_wp = waypoint
while True:
possible_left_wp = left_wp.get_left_lane()
if possible_left_wp is None or possible_left_wp.lane_type != carla.LaneType.Driving:
break
if possible_left_wp.lane_id * left_wp.lane_id < 0:
break
left_wp = possible_left_wp
same_dir_wps.append(left_wp)
return same_dir_wps
def get_opposite_dir_lanes(waypoint):
"""Gets all the lanes with opposite direction of the road of a wp"""
other_dir_wps = []
other_dir_wp = None
# Get the first lane of the opposite direction
left_wp = waypoint
while True:
possible_left_wp = left_wp.get_left_lane()
if possible_left_wp is None:
break
if possible_left_wp.lane_id * left_wp.lane_id < 0:
other_dir_wp = possible_left_wp
break
left_wp = possible_left_wp
if not other_dir_wp:
return other_dir_wps
# Check roads on the right
right_wp = other_dir_wp
while True:
if right_wp.lane_type == carla.LaneType.Driving:
other_dir_wps.append(right_wp)
possible_right_wp = right_wp.get_right_lane()
if possible_right_wp is None:
break
right_wp = possible_right_wp
return other_dir_wps
def get_lane_key(waypoint):
"""Returns a key corresponding to the waypoint lane. Equivalent to a 'Lane'
object and used to compare waypoint lanes"""
return '' if waypoint is None else get_road_key(waypoint) + '*' + str(waypoint.lane_id)
def get_road_key(waypoint):
"""Returns a key corresponding to the waypoint road. Equivalent to a 'Road'
object and used to compare waypoint roads"""
return '' if waypoint is None else str(waypoint.road_id)
class Source(object):
"""
Source object to store its position and its responsible actors
"""
def __init__(self, wp, actors, entry_lane_wp=''):
self.wp = wp
self.actors = actors
# For road sources
self.mapped_key = get_lane_key(wp)
# For junction sources
self.entry_lane_wp = entry_lane_wp
self.previous_lane_keys = [] # Source lane and connecting lanes of the previous junction
class Junction(object):
"""
Junction object. Stores its topology as well as its state, when active
"""
def __init__(self, junction, junction_id, route_entry_index=None, route_exit_index=None):
# Topology
self.junctions = [junction]
self.id = junction_id
self.route_entry_index = route_entry_index
self.route_exit_index = route_exit_index
self.exit_road_length = 0
self.route_entry_keys = []
self.route_exit_keys = []
self.opposite_entry_keys = []
self.opposite_exit_keys = []
self.entry_wps = []
self.exit_wps = []
self.entry_directions = {'ref': [], 'opposite': [], 'left': [], 'right': []}
self.exit_directions = {'ref': [], 'opposite': [], 'left': [], 'right': []}
# State
self.entry_sources = []
self.exit_sources = []
self.exit_dict = OrderedDict()
self.actor_dict = OrderedDict()
self.scenario_info = {
'direction': None,
'remove_entries': False,
'remove_middle': False,
'remove_exits': False,
}
def contains(self, other_junction):
"""Checks whether or not a carla.Junction is part of the class"""
other_id = other_junction.id
for junction in self.junctions:
if other_id == junction.id:
return True
return False
class BackgroundActivity(BasicScenario):
"""
Implementation of a scenario to spawn a set of background actors,
and to remove traffic jams in background traffic
This is a single ego vehicle scenario
"""
def __init__(self, world, ego_vehicle, config, route, night_mode=False, debug_mode=False, timeout=0):
"""
Setup all relevant parameters and create scenario
"""
self._map = CarlaDataProvider.get_map()
self.ego_vehicle = ego_vehicle
self.route = route
self.config = config
self._night_mode = night_mode
self.debug = debug_mode
self.timeout = timeout # Timeout of scenario in seconds
super(BackgroundActivity, self).__init__("BackgroundActivity",
[ego_vehicle],
config,
world,
debug_mode,
terminate_on_failure=True,
criteria_enable=True)
def _create_behavior(self):
"""
Basic behavior do nothing, i.e. Idle
"""
# Check if a vehicle is further than X, destroy it if necessary and respawn it
return BackgroundBehavior(self.ego_vehicle, self.route, self._night_mode)
def _create_test_criteria(self):
"""
A list of all test criteria will be created that is later used
in parallel behavior tree.
"""
pass
def __del__(self):
"""
Remove all actors upon deletion
"""
pass
class BackgroundBehavior(AtomicBehavior):
"""
Handles the background activity
"""
def __init__(self, ego_actor, route, night_mode=False, debug=False, name="BackgroundBehavior"):
"""
Setup class members
"""
super(BackgroundBehavior, self).__init__(name)
self.debug = debug
self._map = CarlaDataProvider.get_map()
self._world = CarlaDataProvider.get_world()
timestep = self._world.get_snapshot().timestamp.delta_seconds
self._tm = CarlaDataProvider.get_client().get_trafficmanager(
CarlaDataProvider.get_traffic_manager_port())
self._tm.global_percentage_speed_difference(0.0)
self._night_mode = night_mode
# Global variables
self._ego_actor = ego_actor
self._ego_state = 'road'
self._route_index = 0
self._get_route_data(route)
self._spawn_vertical_shift = 0.2
self._reuse_dist = 10 # When spawning actors, might reuse actors closer to this distance
self._spawn_free_radius = 20 # Sources closer to the ego will not spawn actors
self._fake_junction_ids = []
self._fake_lane_pair_keys = []
# Road variables
self._road_actors = []
self._road_back_actors = {} # Dictionary mapping the actors behind the ego to their lane
self._road_ego_key = None
self._road_extra_front_actors = 0
self._road_sources = []
self._road_checker_index = 0
self._road_ego_key = ""
self._road_front_vehicles = 3 # Amount of vehicles in front of the ego
self._road_back_vehicles = 3 # Amount of vehicles behind the ego
self._road_vehicle_dist = 8 # Distance road vehicles leave betweeen each other[m]
self._road_spawn_dist = 11 # Initial distance between spawned road vehicles [m]
self._road_new_sources_dist = 20 # Distance of the source to the start of the new lanes
self._radius_increase_ratio = 1.8 # Meters the radius increases per m/s of the ego
self._extra_radius = 0.0 # Extra distance to avoid the road behavior from blocking
self._extra_radius_increase_ratio = 0.5 * timestep # Distance the radius increases per tick (0.5 m/s)
self._max_extra_radius = 10 # Max extra distance
self._base_min_radius = 0
self._base_max_radius = 0
self._min_radius = 0
self._max_radius = 0
self._junction_detection_dist = 0
self._get_road_radius()
# Junction variables
self._junctions = []
self._active_junctions = []
self._junction_sources_dist = 40 # Distance from the entry sources to the junction [m]
self._junction_vehicle_dist = 8 # Distance junction vehicles leave betweeen each other[m]
self._junction_spawn_dist = 10 # Initial distance between spawned junction vehicles [m]
self._junction_sources_max_actors = 5 # Maximum vehicles alive at the same time per source
# Opposite lane variables
self._opposite_actors = []
self._opposite_sources = []
self._opposite_route_index = 0
self._opposite_removal_dist = 30 # Distance at which actors are destroyed
self._opposite_sources_dist = 60 # Distance from the ego to the opposite sources [m]
self._opposite_vehicle_dist = 10 # Distance opposite vehicles leave betweeen each other[m]
self._opposite_spawn_dist = 20 # Initial distance between spawned opposite vehicles [m]
self._opposite_sources_max_actors = 8 # Maximum vehicles alive at the same time per source
# Scenario 2 variables
self._is_scenario_2_active = False
self._scenario_2_actors = []
self._activate_break_scenario = False
self._break_duration = 7 # Duration of the scenario
self._next_scenario_time = float('inf')
# Scenario 4 variables
self._is_scenario_4_active = False
self._scenario_4_actors = []
self._ego_exitted_junction = False
self._crossing_dist = None # Distance between the crossing object and the junction exit
self._start_ego_wp = None
# Junction scenario variables
self.scenario_info = {
'direction': None,
'remove_entries': False,
'remove_middle': False,
'remove_exits': False,
} # Same as the Junction.scenario_info, but this stores the data in case no junctions are active
def _get_route_data(self, route):
"""Extract the information from the route"""
self._route = [] # Transform the route into a list of waypoints
self._accum_dist = [] # Save the total traveled distance for each waypoint
prev_trans = None
for trans, _ in route:
self._route.append(self._map.get_waypoint(trans.location))
if prev_trans:
dist = trans.location.distance(prev_trans.location)
self._accum_dist.append(dist + self._accum_dist[-1])
else:
self._accum_dist.append(0)
prev_trans = trans
self._route_length = len(route)
self._route_index = 0
self._route_buffer = 3
def _get_road_radius(self):
"""
Computes the min and max radius of the road behaviorm which will determine the speed of the vehicles.
Vehicles closer than the min radius maintain full speed, while those further than max radius are
stopped. Between the two, the velocity decreases linearly"""
self._base_min_radius = (self._road_front_vehicles + self._road_extra_front_actors) * self._road_spawn_dist
self._base_max_radius = (self._road_front_vehicles + self._road_extra_front_actors + 1) * self._road_spawn_dist
self._min_radius = self._base_min_radius
self._max_radius = self._base_max_radius
def initialise(self):
"""Creates the background activity actors. Pressuposes that the ego is at a road"""
self._create_junction_dict()
ego_wp = self._route[0]
self._road_ego_key = get_lane_key(ego_wp)
same_dir_wps = get_same_dir_lanes(ego_wp)
self._initialise_road_behavior(same_dir_wps, ego_wp)
self._initialise_opposite_sources()
self._initialise_road_checker()
def update(self):
new_status = py_trees.common.Status.RUNNING
prev_ego_index = self._route_index
# Check if the TM destroyed an actor
if self._route_index > 0:
self._check_background_actors()
# Get ego's odometry. For robustness, the closest route point will be used
location = CarlaDataProvider.get_location(self._ego_actor)
ego_wp = self._update_ego_route_location(location)
ego_transform = ego_wp.transform
if self.debug:
string = 'EGO_' + self._ego_state[0].upper()
draw_string(self._world, location, string, self._ego_state, False)
# Parameters and scenarios
self._update_parameters()
self._manage_break_scenario()
# Update ego state
if self._ego_state == 'junction':
self._monitor_ego_junction_exit(ego_wp)
self._monitor_nearby_junctions()
# Update_actors
if self._ego_state == 'junction':
self._monitor_ego_junction_exit(ego_wp)
self._update_junction_actors()
self._update_junction_sources()
else:
self._update_road_actors(prev_ego_index, self._route_index)
self._move_road_checker(prev_ego_index, self._route_index)
self._move_opposite_sources(prev_ego_index, self._route_index)
self._update_opposite_sources()
# Update non junction sources
self._update_opposite_actors(ego_transform)
self._update_road_sources(ego_transform.location)
self._monitor_scenario_4_end(ego_transform.location)
return new_status
def terminate(self, new_status):
"""Destroy all actors"""
all_actors = self._get_actors()
for actor in list(all_actors):
self._destroy_actor(actor)
super(BackgroundBehavior, self).terminate(new_status)
def _get_actors(self):
"""Returns a list of all actors part of the background activity"""
actors = self._road_actors + self._opposite_actors
for junction in self._active_junctions:
actors.extend(list(junction.actor_dict))
return actors
def _check_background_actors(self):
"""Checks if the Traffic Manager has removed a backgroudn actor"""
background_actors = self._get_actors()
alive_ids = [actor.id for actor in self._world.get_actors().filter('vehicle*')]
for actor in background_actors:
if actor.id not in alive_ids:
self._remove_actor_info(actor)
################################
## Junction cache ##
################################
def _create_junction_dict(self):
"""Extracts the junctions the ego vehicle will pass through."""
data = self._get_junctions_data()
fake_data, filtered_data = self._filter_fake_junctions(data)
self._get_fake_lane_pairs(fake_data)
route_data = self._join_complex_junctions(filtered_data)
self._add_junctions_topology(route_data)
self._junctions = route_data
def _get_junctions_data(self):
"""Gets all the junctions the ego passes through"""
junction_data = []
junction_num = 0
start_index = 0
# Ignore the junction the ego spawns at
for i in range(0, self._route_length - 1):
if not self._is_junction(self._route[i]):
start_index = i
break
for i in range(start_index, self._route_length - 1):
next_wp = self._route[i+1]
prev_junction = junction_data[-1] if len(junction_data) > 0 else None
# Searching for the junction exit
if prev_junction and prev_junction.route_exit_index is None:
if not self._is_junction(next_wp) or next_wp.get_junction().id != junction_id:
prev_junction.route_exit_index = i+1
# Searching for a junction
elif self._is_junction(next_wp):
junction_id = next_wp.get_junction().id
if prev_junction:
start_dist = self._accum_dist[i]
prev_end_dist = self._accum_dist[prev_junction.route_exit_index]
prev_junction.exit_road_length = start_dist - prev_end_dist
# Same junction as the prev one and closer than 2 meters
if prev_junction and prev_junction.junctions[-1].id == junction_id:
start_dist = self._accum_dist[i]
prev_end_dist = self._accum_dist[prev_junction.route_exit_index]
distance = start_dist - prev_end_dist
if distance < 2:
prev_junction.junctions.append(next_wp.get_junction())
prev_junction.route_exit_index = None
continue
junction_data.append(Junction(next_wp.get_junction(), junction_num, i))
junction_num += 1
if len(junction_data) > 0:
road_end_dist = self._accum_dist[self._route_length - 1]
if junction_data[-1].route_exit_index:
route_start_dist = self._accum_dist[junction_data[-1].route_exit_index]
else:
route_start_dist = self._accum_dist[self._route_length - 1]
junction_data[-1].exit_road_length = road_end_dist - route_start_dist
return junction_data
def _filter_fake_junctions(self, data):
"""
Filters fake junctions. As a general note, a fake junction is that where no road lane divide in two.
However, this might fail for some CARLA maps, so check junctions which have all lanes straight too
"""
fake_data = []
filtered_data = []
threshold = math.radians(15)
for junction_data in data:
used_entry_lanes = []
used_exit_lanes = []
for junction in junction_data.junctions:
for entry_wp, exit_wp in junction.get_waypoints(carla.LaneType.Driving):
entry_wp = self._get_junction_entry_wp(entry_wp)
if not entry_wp:
continue
if get_lane_key(entry_wp) not in used_entry_lanes:
used_entry_lanes.append(get_lane_key(entry_wp))
exit_wp = self._get_junction_exit_wp(exit_wp)
if not exit_wp:
continue
if get_lane_key(exit_wp) not in used_exit_lanes:
used_exit_lanes.append(get_lane_key(exit_wp))
if not used_entry_lanes and not used_exit_lanes:
fake_data.append(junction_data)
continue
found_turn = False
for entry_wp, exit_wp in junction_data.junctions[0].get_waypoints(carla.LaneType.Driving):
entry_heading = entry_wp.transform.get_forward_vector()
exit_heading = exit_wp.transform.get_forward_vector()
dot = entry_heading.x * exit_heading.x + entry_heading.y * exit_heading.y
if dot < math.cos(threshold):
found_turn = True
break
if not found_turn:
fake_data.append(junction_data)
else:
filtered_data.append(junction_data)
return fake_data, filtered_data
def _get_complex_junctions(self):
"""
Function to hardcode the topology of some complex junctions. This is done for the roundabouts,
as the current API doesn't offer that info as well as others such as the gas station at Town04.
If there are micro lanes between connected junctions, add them to the fake_lane_keys, connecting
them when their topology is calculated
"""
complex_junctions = []
fake_lane_keys = []
if 'Town03' in self._map.name:
# Roundabout, take it all as one
complex_junctions.append([
self._map.get_waypoint_xodr(1100, -5, 16.6).get_junction(),
self._map.get_waypoint_xodr(1624, -5, 25.3).get_junction(),
self._map.get_waypoint_xodr(1655, -5, 8.3).get_junction(),
self._map.get_waypoint_xodr(1772, 3, 16.2).get_junction(),
self._map.get_waypoint_xodr(1206, -5, 5.9).get_junction()])
fake_lane_keys.extend([
['37*-4', '36*-4'], ['36*-4', '37*-4'],
['37*-5', '36*-5'], ['36*-5', '37*-5'],
['38*-4', '12*-4'], ['12*-4', '38*-4'],
['38*-5', '12*-5'], ['12*-5', '38*-5']])
# Gas station
complex_junctions.append([
self._map.get_waypoint_xodr(1031, -1, 11.3).get_junction(),
self._map.get_waypoint_xodr(100, -1, 18.8).get_junction(),
self._map.get_waypoint_xodr(1959, -1, 22.7).get_junction()])
fake_lane_keys.extend([
['32*-2', '33*-2'], ['33*-2', '32*-2'],
['32*-1', '33*-1'], ['33*-1', '32*-1'],
['32*4', '33*4'], ['33*4', '32*4'],
['32*5', '33*5'], ['33*5', '32*5']])
elif 'Town04' in self._map.name:
# Gas station
complex_junctions.append([
self._map.get_waypoint_xodr(518, -1, 8.1).get_junction(),
self._map.get_waypoint_xodr(886, 1, 10.11).get_junction(),
self._map.get_waypoint_xodr(467, 1, 25.8).get_junction()])
self._fake_lane_pair_keys.extend(fake_lane_keys)
return complex_junctions
def _join_complex_junctions(self, filtered_data):
"""
Joins complex junctions into one. This makes it such that all the junctions,
as well as their connecting lanes, are treated as the same junction
"""
route_data = []
prev_index = -1
# If entering a complex, add all its junctions to the list
for junction_data in filtered_data:
junction = junction_data.junctions[0]
prev_junction = route_data[-1] if len(route_data) > 0 else None
complex_junctions = self._get_complex_junctions()
# Get the complex index
current_index = -1
for i, complex_junctions in enumerate(complex_junctions):
complex_ids = [j.id for j in complex_junctions]
if junction.id in complex_ids:
current_index = i
break
if current_index == -1:
# Outside a complex, add it
route_data.append(junction_data)
elif current_index == prev_index:
# Same complex as the previous junction
prev_junction.route_exit_index = junction_data.route_exit_index
else:
# New complex, add it
junction_ids = [j.id for j in junction_data.junctions]
for complex_junction in complex_junctions:
if complex_junction.id not in junction_ids:
junction_data.junctions.append(complex_junction)
route_data.append(junction_data)
prev_index = current_index
return route_data
def _get_fake_lane_pairs(self, fake_data):
"""Gets a list of entry-exit lanes of the fake junctions"""
for fake_junctions_data in fake_data:
for junction in fake_junctions_data.junctions:
for entry_wp, exit_wp in junction.get_waypoints(carla.LaneType.Driving):
while self._is_junction(entry_wp):
entry_wps = entry_wp.previous(0.5)
if len(entry_wps) == 0:
break # Stop when there's no prev
entry_wp = entry_wps[0]
if self._is_junction(entry_wp):
continue # Triggered by the loops break
while self._is_junction(exit_wp):
exit_wps = exit_wp.next(0.5)
if len(exit_wps) == 0:
break # Stop when there's no prev
exit_wp = exit_wps[0]
if self._is_junction(exit_wp):
continue # Triggered by the loops break
self._fake_junction_ids.append(junction.id)
self._fake_lane_pair_keys.append([get_lane_key(entry_wp), get_lane_key(exit_wp)])
def _get_junction_entry_wp(self, entry_wp):
"""For a junction waypoint, returns a waypoint outside of it that entrys into its lane"""
# Exit the junction
while self._is_junction(entry_wp):
entry_wps = entry_wp.previous(0.2)
if len(entry_wps) == 0:
return None # Stop when there's no prev
entry_wp = entry_wps[0]
return entry_wp
def _get_junction_exit_wp(self, exit_wp):
"""For a junction waypoint, returns a waypoint outside of it from which the lane exits the junction"""
while self._is_junction(exit_wp):
exit_wps = exit_wp.next(0.2)
if len(exit_wps) == 0:
return None # Stop when there's no prev
exit_wp = exit_wps[0]
return exit_wp
def _get_closest_junction_waypoint(self, waypoint, junction_wps):
"""
Matches a given wp to another one inside the list.
This is first done by checking its key, and if this fails, the closest wp is chosen
"""
# Check the lane keys
junction_keys = [get_lane_key(waypoint_) for waypoint_ in junction_wps]
if get_lane_key(waypoint) in junction_keys:
return waypoint
# Get the closest one
closest_dist = float('inf')
closest_junction_wp = None
route_location = waypoint.transform.location
for junction_wp in junction_wps:
distance = junction_wp.transform.location.distance(route_location)
if distance < closest_dist:
closest_dist = distance
closest_junction_wp = junction_wp
return closest_junction_wp
def _is_route_wp_behind_junction_wp(self, route_wp, junction_wp):
"""Checks if an actor is behind the ego. Uses the route transform"""
route_location = route_wp.transform.location
junction_transform = junction_wp.transform
junction_heading = junction_transform.get_forward_vector()
wps_vec = route_location - junction_transform.location
if junction_heading.x * wps_vec.x + junction_heading.y * wps_vec.y < - 0.09: # 85º
return True
return False
def _add_junctions_topology(self, route_data):
"""Gets the entering and exiting lanes of a multijunction"""
for junction_data in route_data:
used_entry_lanes = []
used_exit_lanes = []
entry_lane_wps = []
exit_lane_wps = []
if self.debug:
print(' --------------------- ')
for junction in junction_data.junctions:
for entry_wp, exit_wp in junction.get_waypoints(carla.LaneType.Driving):
entry_wp = self._get_junction_entry_wp(entry_wp)
if not entry_wp:
continue
if get_lane_key(entry_wp) not in used_entry_lanes:
used_entry_lanes.append(get_lane_key(entry_wp))
entry_lane_wps.append(entry_wp)
if self.debug:
draw_point(self._world, entry_wp.transform.location, 'small', 'entry', True)
exit_wp = self._get_junction_exit_wp(exit_wp)
if not exit_wp:
continue
if get_lane_key(exit_wp) not in used_exit_lanes:
used_exit_lanes.append(get_lane_key(exit_wp))
exit_lane_wps.append(exit_wp)
if self.debug:
draw_point(self._world, exit_wp.transform.location, 'small', 'exit', True)
# Check for connecting lanes. This is pretty much for the roundabouts, but some weird geometries
# make it possible for single junctions to have the same road entering and exiting. Two cases,
# Lanes that exit one junction and enter another (or viceversa)
exit_lane_keys = [get_lane_key(wp) for wp in exit_lane_wps]
entry_lane_keys = [get_lane_key(wp) for wp in entry_lane_wps]
for wp in list(entry_lane_wps):
if get_lane_key(wp) in exit_lane_keys:
entry_lane_wps.remove(wp)
if self.debug:
draw_point(self._world, wp.transform.location, 'small', 'connect', True)
for wp in list(exit_lane_wps):
if get_lane_key(wp) in entry_lane_keys:
exit_lane_wps.remove(wp)
if self.debug:
draw_point(self._world, wp.transform.location, 'small', 'connect', True)
# Lanes with a fake junction in the middle (maps junction exit to fake junction entry and viceversa)
for entry_key, exit_key in self._fake_lane_pair_keys:
entry_wp = None
for wp in entry_lane_wps:
if get_lane_key(wp) == exit_key: # A junction exit is a fake junction entry
entry_wp = wp
break
exit_wp = None
for wp in exit_lane_wps:
if get_lane_key(wp) == entry_key: # A junction entry is a fake junction exit
exit_wp = wp
break
if entry_wp and exit_wp:
entry_lane_wps.remove(entry_wp)
exit_lane_wps.remove(exit_wp)
if self.debug:
draw_point(self._world, entry_wp.transform.location, 'small', 'connect', True)
draw_point(self._world, exit_wp.transform.location, 'small', 'connect', True)
junction_data.entry_wps = entry_lane_wps
junction_data.exit_wps = exit_lane_wps
# Filter the entries and exits that correspond to the route
route_entry_wp = self._route[junction_data.route_entry_index]
# Junction entry
for wp in get_same_dir_lanes(route_entry_wp):
junction_wp = self._get_closest_junction_waypoint(wp, entry_lane_wps)
junction_data.route_entry_keys.append(get_lane_key(junction_wp))
for wp in get_opposite_dir_lanes(route_entry_wp):
junction_wp = self._get_closest_junction_waypoint(wp, exit_lane_wps)
junction_data.opposite_exit_keys.append(get_lane_key(junction_wp))
# Junction exit
if junction_data.route_exit_index: # Can be None if route ends at a junction
route_exit_wp = self._route[junction_data.route_exit_index]
for wp in get_same_dir_lanes(route_exit_wp):
junction_wp = self._get_closest_junction_waypoint(wp, exit_lane_wps)
junction_data.route_exit_keys.append(get_lane_key(junction_wp))
for wp in get_opposite_dir_lanes(route_exit_wp):
junction_wp = self._get_closest_junction_waypoint(wp, entry_lane_wps)
junction_data.opposite_entry_keys.append(get_lane_key(junction_wp))
# Add the entry directions of each lane with respect to the route. Used for scenarios 7 to 9
route_entry_yaw = route_entry_wp.transform.rotation.yaw
for wp in entry_lane_wps:
diff = (wp.transform.rotation.yaw - route_entry_yaw) % 360
if diff > 330.0:
direction = 'ref'
elif diff > 225.0:
direction = 'right'
elif diff > 135.0:
direction = 'opposite'
elif diff > 30.0:
direction = 'left'
else:
direction = 'ref'
junction_data.entry_directions[direction].append(get_lane_key(wp))
# Supposing scenario vehicles go straight, these correspond to the exit lanes of the entry directions
for wp in exit_lane_wps:
diff = (wp.transform.rotation.yaw - route_entry_yaw) % 360
if diff > 330.0:
direction = 'ref'
elif diff > 225.0:
direction = 'right'
elif diff > 135.0:
direction = 'opposite'
elif diff > 30.0:
direction = 'left'
else:
direction = 'ref'
junction_data.exit_directions[direction].append(get_lane_key(wp))
if self.debug:
exit_lane = self._route[junction_data.route_exit_index] if junction_data.route_exit_index else None
print('> R Entry Lane: {}'.format(get_lane_key(self._route[junction_data.route_entry_index])))
print('> R Exit Lane: {}'.format(get_lane_key(exit_lane)))
entry_print = '> J Entry Lanes: '
for entry_wp in entry_lane_wps:
key = get_lane_key(entry_wp)
entry_print += key + ' ' * (6 - len(key))
print(entry_print)
exit_print = '> J Exit Lanes: '
for exit_wp in exit_lane_wps:
key = get_lane_key(exit_wp)
exit_print += key + ' ' * (6 - len(key))
print(exit_print)
route_entry = '> R-J Entry Lanes: '
for entry_key in junction_data.route_entry_keys:
route_entry += entry_key + ' ' * (6 - len(entry_key))
print(route_entry)
route_exit = '> R-J Route Exit Lanes: '
for exit_key in junction_data.route_exit_keys:
route_exit += exit_key + ' ' * (6 - len(exit_key))
print(route_exit)
route_oppo_entry = '> R-J Oppo Entry Lanes: '
for oppo_entry_key in junction_data.opposite_entry_keys:
route_oppo_entry += oppo_entry_key + ' ' * (6 - len(oppo_entry_key))
print(route_oppo_entry)
route_oppo_exit = '> R-J Oppo Exit Lanes: '
for oppo_exit_key in junction_data.opposite_exit_keys:
route_oppo_exit += oppo_exit_key + ' ' * (6 - len(oppo_exit_key))
print(route_oppo_exit)
################################
## Waypoint related functions ##
################################
def _is_junction(self, waypoint):
if not waypoint.is_junction or waypoint.junction_id in self._fake_junction_ids:
return False
return True
################################
## Mode functions ##
################################
def _add_actor_dict_element(self, actor_dict, actor, exit_lane_key='', at_oppo_entry_lane=False):
"""Adds a new actor to the actor dictionary"""
actor_dict[actor] = {
'state': 'junction_entry' if not exit_lane_key else 'junction_exit',
'exit_lane_key': exit_lane_key,
'at_oppo_entry_lane': at_oppo_entry_lane
}
def _switch_to_junction_mode(self, junction):
"""Prepares the junction mode, changing the state of the actors"""
self._ego_state = 'junction'
for actor in list(self._road_actors):
self._add_actor_dict_element(junction.actor_dict, actor)
self._road_actors.remove(actor)
if not self._is_scenario_2_active:
self._tm.vehicle_percentage_speed_difference(actor, 0)
self._road_back_actors.clear()
self._road_extra_front_actors = 0
self._opposite_sources.clear()
def _initialise_junction_scenario(self, direction, remove_entries, remove_exits, remove_middle):
"""
Removes all vehicles in a particular 'direction' as well as all actors inside the junction.
Additionally, activates some flags to ensure the junction is empty at all times
"""
if self._active_junctions:
scenario_junction = self._active_junctions[0]
scenario_junction.scenario_info = {
'direction': direction,
'remove_entries': remove_entries,
'remove_middle': remove_middle,
'remove_exits': remove_exits,
}
entry_direction_keys = scenario_junction.entry_directions[direction]
actor_dict = scenario_junction.actor_dict
if remove_entries:
for entry_source in scenario_junction.entry_sources:
if get_lane_key(entry_source.entry_lane_wp) in entry_direction_keys:
# Source is affected
actors = entry_source.actors
for actor in list(actors):
if actor_dict[actor]['state'] == 'junction_entry':
# Actor is at the entry lane
self._destroy_actor(actor)
if remove_exits:
for exit_dir in scenario_junction.exit_directions[direction]:
for actor in list(scenario_junction.exit_dict[exit_dir]['actors']):
self._destroy_actor(actor)
if remove_middle:
actor_dict = scenario_junction.actor_dict
for actor in list(actor_dict):
if actor_dict[actor]['state'] == 'junction_middle':
self._destroy_actor(actor)
def _handle_junction_scenario_end(self, junction):
"""Ends the junction scenario interaction. This is pretty much useless as the junction
scenario ends at the same time as the active junction, but in the future it might not"""
junction.scenario_info = {
'direction': None,
'remove_entries': False,
'remove_middle': False,
'remove_exits': False,
}
def _monitor_scenario_4_end(self, ego_location):
"""Monitors the ego distance to the junction to know if the scenario 4 has ended"""
if self._ego_exitted_junction:
ref_location = self._start_ego_wp.transform.location
if ego_location.distance(ref_location) > self._crossing_dist:
for actor in self._scenario_4_actors:
self._tm.vehicle_percentage_speed_difference(actor, 0)
self._is_scenario_4_active = False
self._scenario_4_actors.clear()
self._ego_exitted_junction = False
self._crossing_dist = None
def _handle_scenario_4_interaction(self, junction, ego_wp):
"""
Handles the interation between the scenario 4 of the Leaderboard and the background activity.
This removes all vehicles near the bycicle path, and stops the others so that they don't interfere
"""
if not self._is_scenario_4_active:
return
self._ego_exitted_junction = True
self._start_ego_wp = ego_wp
min_crossing_space = 2
# Actor exitting the junction
exit_dict = junction.exit_dict
for exit_key in exit_dict:
if exit_key not in junction.route_exit_keys:
continue
actors = exit_dict[exit_key]['actors']
exit_lane_wp = exit_dict[exit_key]['ref_wp']
exit_lane_location = exit_lane_wp.transform.location
for actor in list(actors):
actor_location = CarlaDataProvider.get_location(actor)
if not actor_location:
self._destroy_actor(actor)
continue
dist_to_scenario = exit_lane_location.distance(actor_location) - self._crossing_dist
actor_length = actor.bounding_box.extent.x
if abs(dist_to_scenario) < actor_length + min_crossing_space:
self._destroy_actor(actor)
continue
if dist_to_scenario > 0:
continue # Don't stop the actors that have already passed the scenario
if get_lane_key(ego_wp) == get_lane_key(exit_lane_wp):
self._destroy_actor(actor)
continue # Actor at the ego lane and between the ego and scenario
self._scenario_4_actors.append(actor)
# Actor entering the junction
for entry_source in junction.entry_sources:
entry_lane_wp = entry_source.entry_lane_wp
if get_lane_key(entry_lane_wp) in junction.opposite_entry_keys:
# Source is affected
actors = entry_source.actors
entry_lane_location = entry_lane_wp.transform.location
for actor in list(actors):
actor_location = CarlaDataProvider.get_location(actor)
if not actor_location:
self._destroy_actor(actor)
continue
crossing_space = abs(entry_lane_location.distance(actor_location) - self._crossing_dist)
actor_length = actor.bounding_box.extent.x
if crossing_space < actor_length + min_crossing_space:
self._destroy_actor(actor)
continue # Actors blocking the path of the crossing obstacle
self._scenario_4_actors.append(actor)
# Actors entering the next junction
if len(self._active_junctions) > 1:
next_junction = self._active_junctions[1]
actors_dict = next_junction.actor_dict
for actor in list(actors_dict):
if actors_dict[actor]['state'] != 'junction_entry':
continue
actor_location = CarlaDataProvider.get_location(actor)
if not actor_location:
self._destroy_actor(actor)
continue
dist_to_scenario = exit_lane_location.distance(actor_location) - self._crossing_dist
actor_length = actor.bounding_box.extent.x
if abs(dist_to_scenario) < actor_length + min_crossing_space:
self._destroy_actor(actor)
continue
if dist_to_scenario > 0:
continue # Don't stop the actors that have already passed the scenario
actor_wp = self._map.get_waypoint(actor_location)
if get_lane_key(ego_wp) == get_lane_key(actor_wp):
self._destroy_actor(actor)
continue # Actor at the ego lane and between the ego and scenario
self._scenario_4_actors.append(actor)
# Immediately freeze the actors
for actor in self._scenario_4_actors:
try:
actor.set_target_velocity(carla.Vector3D(0, 0, 0))
self._tm.vehicle_percentage_speed_difference(actor, 100)
except RuntimeError:
pass # Just in case the actor is not alive
def _end_junction_behavior(self, ego_wp, junction):
"""
Destroys unneeded actors (those behind the ego), moves the rest to other data structures
and cleans up the variables. If no other junctions are active, starts road mode
"""
actor_dict = junction.actor_dict
route_exit_keys = junction.route_exit_keys
self._active_junctions.pop(0)
for actor in list(actor_dict):
location = CarlaDataProvider.get_location(actor)
if not location or self._is_location_behind_ego(location):
self._destroy_actor(actor)
continue
self._tm.vehicle_percentage_speed_difference(actor, 0)
if actor_dict[actor]['at_oppo_entry_lane']:
self._opposite_actors.append(actor)
self._tm.ignore_lights_percentage(actor, 100)
self._tm.ignore_signs_percentage(actor, 100)
continue
if not self._active_junctions and actor_dict[actor]['exit_lane_key'] in route_exit_keys:
self._road_actors.append(actor)
continue
self._destroy_actor(actor)
self._handle_scenario_4_interaction(junction, ego_wp)
self._handle_junction_scenario_end(junction)
self._switch_junction_road_sources(junction)
if not self._active_junctions:
self._ego_state = 'road'
self._initialise_opposite_sources()
self._initialise_road_checker()
self._road_ego_key = self._get_ego_route_lane_key(ego_wp)
for source in junction.exit_sources:
self._road_back_actors[source.mapped_key] = []
def _switch_junction_road_sources(self, junction):
"""
Removes the sources part of the previous road and gets the ones of the exitted junction.
"""
self._road_sources.clear()
new_sources = junction.exit_sources
self._road_sources.extend(new_sources)
def _search_for_next_junction(self):
"""Check if closeby to a junction. The closest one will always be the first"""
if not self._junctions:
return None
ego_accum_dist = self._accum_dist[self._route_index]
junction_accum_dist = self._accum_dist[self._junctions[0].route_entry_index]
if junction_accum_dist - ego_accum_dist < self._junction_detection_dist: # Junctions closeby
return self._junctions.pop(0)
return None
def _initialise_connecting_lanes(self, junction):
"""
Moves the actors currently at the exit lane of the last junction
to entry actors of the newly created junction
"""
if len(self._active_junctions) > 0:
prev_junction = self._active_junctions[-1]
route_exit_keys = prev_junction.route_exit_keys
exit_dict = prev_junction.exit_dict
for exit_key in route_exit_keys:
exit_actors = exit_dict[exit_key]['actors']
for actor in list(exit_actors):
self._remove_actor_info(actor)
self._add_actor_dict_element(junction.actor_dict, actor)
self._tm.vehicle_percentage_speed_difference(actor, 0)
def _monitor_nearby_junctions(self):
"""
Monitors when the ego approaches a junction, preparing the junction mode when it happens.
This can be triggered even if there is another junction behavior happening
"""
junction = self._search_for_next_junction()
if not junction:
return
if self._ego_state == 'road':
self._switch_to_junction_mode(junction)
self._initialise_junction_sources(junction)
self._initialise_junction_exits(junction)
self._initialise_connecting_lanes(junction)
self._active_junctions.append(junction)
def _monitor_ego_junction_exit(self, ego_wp):
"""
Monitors when the ego exits the junctions, preparing the road mode when that happens
"""
current_junction = self._active_junctions[0]
exit_index = current_junction.route_exit_index
if exit_index and self._route_index >= exit_index:
self._end_junction_behavior(ego_wp, current_junction)
def _add_incoming_actors(self, junction, source):
"""Checks nearby actors that will pass through the source, adding them to it"""
source_location = source.wp.transform.location
if not source.previous_lane_keys:
source.previous_lane_keys = [get_lane_key(prev_wp) for prev_wp in source.wp.previous(self._reuse_dist)]
source.previous_lane_keys.append(get_lane_key(source.wp))
for actor in self._get_actors():
if actor in source.actors:
continue # Don't use actors already part of the source
actor_location = CarlaDataProvider.get_location(actor)
if actor_location is None:
continue # No idea where the actor is, ignore it
if source_location.distance(actor_location) > self._reuse_dist:
continue # Don't use actors far away
actor_wp = self._map.get_waypoint(actor_location)
if get_lane_key(actor_wp) not in source.previous_lane_keys:
continue # Don't use actors that won't pass through the source
self._tm.vehicle_percentage_speed_difference(actor, 0)
self._remove_actor_info(actor)
source.actors.append(actor)
at_oppo_entry_lane = get_lane_key(source.entry_lane_wp) in junction.opposite_entry_keys
self._add_actor_dict_element(junction.actor_dict, actor, at_oppo_entry_lane=at_oppo_entry_lane)
return actor
def _update_road_sources(self, ego_location):
"""
Manages the sources that spawn actors behind the ego.
Sources are destroyed after their actors are spawned
"""
for source in list(self._road_sources):
if self.debug:
draw_point(self._world, source.wp.transform.location, 'small', self._ego_state, False)
draw_string(self._world, source.wp.transform.location, str(len(source.actors)), self._ego_state, False)
if len(source.actors) >= self._road_back_vehicles:
self._road_sources.remove(source)
continue
if len(source.actors) == 0:
location = ego_location
else:
location = CarlaDataProvider.get_location(source.actors[-1])
if not location:
continue
distance = location.distance(source.wp.transform.location)
# Spawn a new actor if the last one is far enough
if distance > self._road_spawn_dist:
actor = self._spawn_source_actor(source, ego_dist=self._road_vehicle_dist)
if actor is None:
continue
self._tm.distance_to_leading_vehicle(actor, self._road_vehicle_dist)
source.actors.append(actor)
if self._ego_state == 'road':
self._road_actors.append(actor)
if source.mapped_key in self._road_back_actors:
self._road_back_actors[source.mapped_key].append(actor)
elif self._ego_state == 'junction':
self._add_actor_dict_element(self._active_junctions[0].actor_dict, actor)
################################
## Behavior related functions ##
################################
def _initialise_road_behavior(self, road_wps, ego_wp):
"""Initialises the road behavior, consisting on several vehicle in front of the ego,
and several on the back. The ones on the back are spawned only outside junctions,
and if not enough are spawned, sources are created that will do so later on"""
spawn_wps = []
# Vehicles in front
for wp in road_wps:
next_wp = wp
for _ in range(self._road_front_vehicles):
next_wps = next_wp.next(self._road_spawn_dist)
if len(next_wps) != 1 or self._is_junction(next_wps[0]):
break # Stop when there's no next or found a junction
next_wp = next_wps[0]
spawn_wps.append(next_wp)
for actor in self._spawn_actors(spawn_wps):
self._tm.distance_to_leading_vehicle(actor, self._road_vehicle_dist)
self._road_actors.append(actor)
# Vehicles on the side
for wp in road_wps:
self._road_back_actors[get_lane_key(wp)] = []
if wp.lane_id == ego_wp.lane_id:
continue
actor = self._spawn_actors([wp])[0]
self._tm.distance_to_leading_vehicle(actor, self._road_vehicle_dist)
self._road_actors.append(actor)
self._road_back_actors[get_lane_key(wp)].append(actor)
# Vehicles behind
for wp in road_wps:
spawn_wps = []
prev_wp = wp
for _ in range(self._road_back_vehicles):
prev_wps = prev_wp.previous(self._road_spawn_dist)
if len(prev_wps) != 1 or self._is_junction(prev_wps[0]):
break # Stop when there's no next or found a junction
prev_wp = prev_wps[0]
spawn_wps.append(prev_wp)
actors = self._spawn_actors(spawn_wps)
for actor in actors:
self._tm.distance_to_leading_vehicle(actor, self._road_vehicle_dist)
self._road_actors.append(actor)
self._road_back_actors[get_lane_key(wp)].append(actor)
# If not spawned enough, create actor soruces behind the ego
if len(actors) < self._road_back_vehicles:
self._road_sources.append(Source(prev_wp, actors))
def _initialise_opposite_sources(self):
"""
Gets the waypoints where the actor sources that spawn actors in the opposite direction
will be located. These are at a fixed distance from the ego, but never entering junctions
"""
self._opposite_route_index = None
if not self._junctions:
next_junction_index = self._route_length - 1
else:
next_junction_index = self._junctions[0].route_entry_index
ego_accum_dist = self._accum_dist[self._route_index]
for i in range(self._route_index, next_junction_index):
if self._accum_dist[i] - ego_accum_dist > self._opposite_sources_dist:
self._opposite_route_index = i
break
if not self._opposite_route_index:
# Junction is closer than the opposite source distance
self._opposite_route_index = next_junction_index
oppo_wp = self._route[self._opposite_route_index]
for wp in get_opposite_dir_lanes(oppo_wp):
self._opposite_sources.append(Source(wp, []))
def _initialise_road_checker(self):
"""
Gets the waypoints in front of the ego to continuously check if the road changes
"""
self._road_checker_index = None
if not self._junctions:
upper_limit = self._route_length - 1
else:
upper_limit = self._junctions[0].route_entry_index
ego_accum_dist = self._accum_dist[self._route_index]
for i in range(self._route_index, upper_limit):
if self._accum_dist[i] - ego_accum_dist > self._max_radius:
self._road_checker_index = i
break
if not self._road_checker_index:
self._road_checker_index = upper_limit
def _initialise_junction_sources(self, junction):
"""
Initializes the actor sources to ensure the junction is always populated. They are
placed at certain distance from the junction, but are stopped if another junction is found,
to ensure the spawned actors always move towards the activated one
"""
remove_entries = junction.scenario_info['remove_entries']
direction = junction.scenario_info['direction']
entry_lanes = [] if not direction else junction.entry_directions[direction]
for wp in junction.entry_wps:
entry_lane_key = get_lane_key(wp)
if entry_lane_key in junction.route_entry_keys:
continue # Ignore the road from which the route enters
if remove_entries and entry_lane_key in entry_lanes:
continue # Ignore entries that are part of active junction scenarios
moved_dist = 0
prev_wp = wp
while moved_dist < self._junction_sources_dist:
prev_wps = prev_wp.previous(5)
if len(prev_wps) == 0 or self._is_junction(prev_wps[0]):
break
prev_wp = prev_wps[0]
moved_dist += 5
junction.entry_sources.append(Source(prev_wp, [], entry_lane_wp=wp))
def _initialise_junction_exits(self, junction):
"""
Computes and stores the max capacity of the exit. Prepares the behavior of the next road
by creating actors at the route exit, and the sources that'll create actors behind the ego
"""
exit_wps = junction.exit_wps
route_exit_keys = junction.route_exit_keys
remove_exits = junction.scenario_info['remove_exits']
direction = junction.scenario_info['direction']
exit_lanes = [] if not direction else junction.exit_directions[direction]
for wp in exit_wps:
max_actors = 0
max_distance = 0
exiting_wps = []
next_wp = wp
for i in range(max(self._road_front_vehicles, 1)):
# Get the moving distance (first jump is higher to allow space for another vehicle)
if i == 0:
move_dist = 2 * self._junction_spawn_dist
else:
move_dist = self._junction_spawn_dist
# And move such distance
next_wps = next_wp.next(move_dist)
if len(next_wps) == 0:
break # Stop when there's no next
next_wp = next_wps[0]
if max_actors > 0 and self._is_junction(next_wp):
break # Stop when a junction is found
max_actors += 1
max_distance += move_dist
exiting_wps.insert(0, next_wp)
junction.exit_dict[get_lane_key(wp)] = {
'actors': [], 'max_actors': max_actors, 'ref_wp': wp, 'max_distance': max_distance
}
exit_lane_key = get_lane_key(wp)
if remove_exits and exit_lane_key in exit_lanes:
continue # The direction is prohibited as a junction scenario is active
if exit_lane_key in route_exit_keys:
junction.exit_sources.append(Source(wp, []))
actors = self._spawn_actors(exiting_wps)
for actor in actors:
self._tm.distance_to_leading_vehicle(actor, self._junction_vehicle_dist)
self._add_actor_dict_element(junction.actor_dict, actor, exit_lane_key=exit_lane_key)
junction.exit_dict[exit_lane_key]['actors'] = actors
def _update_junction_sources(self):
"""Checks the actor sources to see if new actors have to be created"""
for junction in self._active_junctions:
remove_entries = junction.scenario_info['remove_entries']
direction = junction.scenario_info['direction']
entry_lanes = [] if not direction else junction.entry_directions[direction]
actor_dict = junction.actor_dict
for source in junction.entry_sources:
if self.debug:
draw_point(self._world, source.wp.transform.location, 'small', 'junction', False)
draw_string(self._world, source.wp.transform.location, str(len(source.actors)), 'junction', False)
entry_lane_key = get_lane_key(source.entry_lane_wp)
at_oppo_entry_lane = entry_lane_key in junction.opposite_entry_keys
# The direction is prohibited as a junction scenario is active
if remove_entries and entry_lane_key in entry_lanes:
continue
self._add_incoming_actors(junction, source)
# Cap the amount of alive actors
if len(source.actors) >= self._junction_sources_max_actors:
continue
# Calculate distance to the last created actor
if len(source.actors) == 0:
distance = self._junction_spawn_dist + 1
else:
actor_location = CarlaDataProvider.get_location(source.actors[-1])
if not actor_location:
continue
distance = actor_location.distance(source.wp.transform.location)
# Spawn a new actor if the last one is far enough
if distance > self._junction_spawn_dist:
actor = self._spawn_source_actor(source)
if not actor:
continue
self._tm.distance_to_leading_vehicle(actor, self._junction_vehicle_dist)
self._add_actor_dict_element(actor_dict, actor, at_oppo_entry_lane=at_oppo_entry_lane)
source.actors.append(actor)
def _found_a_road_change(self, old_index, new_index, ignore_false_junctions=True):
"""Checks if the new route waypoint is part of a new road (excluding fake junctions)"""
if new_index == old_index:
return False
new_wp = self._route[new_index]
old_wp = self._route[old_index]
if get_road_key(new_wp) == get_road_key(old_wp):
return False
if ignore_false_junctions:
new_wp_junction = new_wp.get_junction()
if new_wp_junction and new_wp_junction.id in self._fake_junction_ids:
return False
return True
def _move_road_checker(self, prev_index, current_index):
"""
Continually check the road in front to see if it has changed its topology.
If so and the number of lanes have reduced, remove the actor of the lane that merges into others
"""
if self.debug:
checker_wp = self._route[self._road_checker_index]
draw_point(self._world, checker_wp.transform.location, 'small', 'road', False)
if prev_index == current_index:
return
# Get the new route tracking wp
checker_index = None
last_index = self._junctions[0].route_entry_index if self._junctions else self._route_length - 1
current_accum_dist = self._accum_dist[current_index]
for i in range(self._road_checker_index, last_index):
accum_dist = self._accum_dist[i]
if accum_dist - current_accum_dist >= self._max_radius:
checker_index = i
break
if not checker_index:
checker_index = last_index
if self._found_a_road_change(self._road_checker_index, checker_index):
new_wps = get_same_dir_lanes(self._route[checker_index])
old_wps = get_same_dir_lanes(self._route[self._road_checker_index])
if len(new_wps) >= len(old_wps):
pass
else:
new_accum_dist = self._accum_dist[checker_index]
prev_accum_dist = self._accum_dist[self._road_checker_index]
route_move_dist = new_accum_dist - prev_accum_dist
unmapped_lane_keys = []
for old_wp in list(old_wps):
location = old_wp.transform.location
mapped_wp = None
for new_wp in new_wps:
if location.distance(new_wp.transform.location) < 1.1 * route_move_dist:
mapped_wp = new_wp
break
if not mapped_wp:
unmapped_lane_keys.append(get_lane_key(old_wp))
for actor in list(self._road_actors):
location = CarlaDataProvider.get_location(actor)
if not location:
continue
wp = self._map.get_waypoint(location)
if get_lane_key(wp) in unmapped_lane_keys:
self._destroy_actor(actor)
self._road_checker_index = checker_index
def _move_opposite_sources(self, prev_index, current_index):
"""
Moves the sources of the opposite direction back. Additionally, tracks a point a certain distance
in front of the ego to see if the road topology has to be recalculated
"""
if self.debug:
for source in self._opposite_sources:
draw_point(self._world, source.wp.transform.location, 'small', 'opposite', False)
draw_string(self._world, source.wp.transform.location, str(len(source.actors)), 'opposite', False)
route_wp = self._route[self._opposite_route_index]
draw_point(self._world, route_wp.transform.location, 'small', 'opposite', False)
if prev_index == current_index:
return
# Get the new route tracking wp
oppo_route_index = None
last_index = self._junctions[0].route_entry_index if self._junctions else self._route_length - 1
current_accum_dist = self._accum_dist[current_index]
for i in range(self._opposite_route_index, last_index):
accum_dist = self._accum_dist[i]
if accum_dist - current_accum_dist >= self._opposite_sources_dist:
oppo_route_index = i
break
if not oppo_route_index:
oppo_route_index = last_index
if self._found_a_road_change(self._opposite_route_index, oppo_route_index):
# Recheck the left lanes as the topology might have changed
new_opposite_sources = []
new_opposite_wps = get_opposite_dir_lanes(self._route[oppo_route_index])
# Map the old sources to the new wps, and add new ones / remove uneeded ones
new_accum_dist = self._accum_dist[oppo_route_index]
prev_accum_dist = self._accum_dist[self._opposite_route_index]
route_move_dist = new_accum_dist - prev_accum_dist
for wp in new_opposite_wps:
location = wp.transform.location
new_source = None
for source in self._opposite_sources:
if location.distance(source.wp.transform.location) < 1.1 * route_move_dist:
new_source = source
break
if new_source:
new_source.wp = wp
new_opposite_sources.append(source)
self._opposite_sources.remove(source)
else:
new_opposite_sources.append(Source(wp, []))
self._opposite_sources = new_opposite_sources
else:
prev_accum_dist = self._accum_dist[prev_index]
current_accum_dist = self._accum_dist[current_index]
move_dist = current_accum_dist - prev_accum_dist
if move_dist <= 0:
return
for source in self._opposite_sources:
wp = source.wp
if not self._is_junction(wp):
prev_wps = wp.previous(move_dist)
if len(prev_wps) == 0:
continue
prev_wp = prev_wps[0]
source.wp = prev_wp
self._opposite_route_index = oppo_route_index
def _update_opposite_sources(self):
"""Checks the opposite actor sources to see if new actors have to be created"""
for source in self._opposite_sources:
# Cap the amount of alive actors
if len(source.actors) >= self._opposite_sources_max_actors:
continue
# Calculate distance to the last created actor
if len(source.actors) == 0:
distance = self._opposite_spawn_dist + 1
else:
actor_location = CarlaDataProvider.get_location(source.actors[-1])
if not actor_location:
continue
distance = source.wp.transform.location.distance(actor_location)
# Spawn a new actor if the last one is far enough
if distance > self._opposite_spawn_dist:
actor = self._spawn_source_actor(source)
if actor is None:
continue
self._tm.distance_to_leading_vehicle(actor, self._opposite_vehicle_dist)
self._opposite_actors.append(actor)
source.actors.append(actor)
def _update_parameters(self):
"""Changes the parameters depending on the blackboard variables and / or the speed of the ego"""
road_behavior_data = py_trees.blackboard.Blackboard().get("BA_RoadBehavior")
if road_behavior_data:
num_front_vehicles, num_back_vehicles, vehicle_dist, spawn_dist = road_behavior_data
if num_front_vehicles:
self._road_front_vehicles = num_front_vehicles
if num_back_vehicles:
self._road_back_vehicles = num_back_vehicles
if vehicle_dist:
self._road_vehicle_dist = vehicle_dist
if spawn_dist:
self._road_spawn_dist = spawn_dist
self._get_road_radius()
py_trees.blackboard.Blackboard().set("BA_RoadBehavior", None, True)
opposite_behavior_data = py_trees.blackboard.Blackboard().get("BA_OppositeBehavior")
if opposite_behavior_data:
source_dist, vehicle_dist, spawn_dist, max_actors = road_behavior_data
if source_dist:
if source_dist < self._junction_sources_dist:
print("WARNING: Opposite sources distance is lower than the junction ones. Ignoring it")
else:
self._opposite_sources_dist = source_dist
if vehicle_dist:
self._opposite_vehicle_dist = vehicle_dist
if spawn_dist:
self._opposite_spawn_dist = spawn_dist
if max_actors:
self._opposite_sources_max_actors = max_actors
py_trees.blackboard.Blackboard().set("BA_OppositeBehavior", None, True)
junction_behavior_data = py_trees.blackboard.Blackboard().get("BA_JunctionBehavior")
if junction_behavior_data:
source_dist, vehicle_dist, spawn_dist, max_actors = road_behavior_data
if source_dist:
if source_dist > self._opposite_sources_dist:
print("WARNING: Junction sources distance is higher than the opposite ones. Ignoring it")
else:
self._junction_sources_dist = source_dist
if vehicle_dist:
self._junction_vehicle_dist = vehicle_dist
if spawn_dist:
self._junction_spawn_dist = spawn_dist
if max_actors:
self._junction_sources_max_actors = max_actors
py_trees.blackboard.Blackboard().set("BA_JunctionBehavior", None, True)
break_duration = py_trees.blackboard.Blackboard().get("BA_Scenario2")
if break_duration:
if self._is_scenario_2_active:
print("WARNING: A break scenario was requested but another one is already being triggered.")
else:
self._activate_break_scenario = True
self._break_duration = break_duration
py_trees.blackboard.Blackboard().set("BA_Scenario2", None, True)
crossing_dist = py_trees.blackboard.Blackboard().get("BA_Scenario4")
if crossing_dist:
self._is_scenario_4_active = True
self._crossing_dist = crossing_dist
py_trees.blackboard.Blackboard().set("BA_Scenario4", None, True)
direction = py_trees.blackboard.Blackboard().get("BA_Scenario7")
if direction:
self._initialise_junction_scenario(direction, True, True, True)
py_trees.blackboard.Blackboard().set("BA_Scenario7", None, True)
direction = py_trees.blackboard.Blackboard().get("BA_Scenario8")
if direction:
self._initialise_junction_scenario(direction, True, True, True)
py_trees.blackboard.Blackboard().set("BA_Scenario8", None, True)
direction = py_trees.blackboard.Blackboard().get("BA_Scenario9")
if direction:
self._initialise_junction_scenario(direction, True, False, True)
py_trees.blackboard.Blackboard().set("BA_Scenario9", None, True)
direction = py_trees.blackboard.Blackboard().get("BA_Scenario10")
if direction:
self._initialise_junction_scenario(direction, False, False, False)
py_trees.blackboard.Blackboard().set("BA_Scenario10", None, True)
self._compute_parameters()
def _compute_parameters(self):
"""Computes the parameters that are dependent on the speed of the ego. """
ego_speed = CarlaDataProvider.get_velocity(self._ego_actor)
# As the vehicles don't move if the agent doesn't, some agents might get blocked forever.
# Partially avoid this by adding an extra distance to the radius when the vehicle is stopped
# in the middle of the road and unaffected by any object such as traffic lights or stops.
if ego_speed == 0 \
and not self._is_scenario_2_active \
and not self._ego_actor.is_at_traffic_light() \
and len(self._active_junctions) <= 0:
self._extra_radius = min(self._extra_radius + self._extra_radius_increase_ratio, self._max_extra_radius)
# At all cases, reduce it if the agent is moving
if ego_speed > 0 and self._extra_radius > 0:
self._extra_radius = max(self._extra_radius - self._extra_radius_increase_ratio, 0)
self._min_radius = self._base_min_radius + self._radius_increase_ratio * ego_speed + self._extra_radius
self._max_radius = self._base_max_radius + self._radius_increase_ratio * ego_speed + self._extra_radius
self._junction_detection_dist = self._max_radius
def _manage_break_scenario(self):
"""
Manages the break scenario, where all road vehicles in front of the ego suddenly stop,
wait for a bit, and start moving again. This will never trigger unless done so from outside.
"""
if self._is_scenario_2_active:
self._next_scenario_time -= self._world.get_snapshot().timestamp.delta_seconds
if self._next_scenario_time <= 0:
for actor in self._scenario_2_actors:
self._tm.vehicle_percentage_speed_difference(actor, 0)
lights = actor.get_light_state()
lights &= ~carla.VehicleLightState.Brake
actor.set_light_state(carla.VehicleLightState(lights))
self._scenario_2_actors = []
self._is_scenario_2_active = False
elif self._activate_break_scenario:
for actor in self._road_actors:
location = CarlaDataProvider.get_location(actor)
if location and not self._is_location_behind_ego(location):
self._scenario_2_actors.append(actor)
self._tm.vehicle_percentage_speed_difference(actor, 100)
lights = actor.get_light_state()
lights |= carla.VehicleLightState.Brake
actor.set_light_state(carla.VehicleLightState(lights))
self._is_scenario_2_active = True
self._activate_break_scenario = False
self._next_scenario_time = self._break_duration
#############################
## Actor functions ##
#############################
def _spawn_actors(self, spawn_wps):
"""Spawns several actors in batch"""
spawn_transforms = []
for wp in spawn_wps:
spawn_transforms.append(
carla.Transform(wp.transform.location + carla.Location(z=self._spawn_vertical_shift),
wp.transform.rotation)
)
actors = CarlaDataProvider.request_new_batch_actors(
'vehicle.*', len(spawn_transforms), spawn_transforms, True, False, 'background',
safe_blueprint=True, tick=False)
if not actors:
return actors
for actor in actors:
self._tm.auto_lane_change(actor, False)
if self._night_mode:
for actor in actors:
actor.set_light_state(carla.VehicleLightState(
carla.VehicleLightState.Position | carla.VehicleLightState.LowBeam))
return actors
def _spawn_source_actor(self, source, ego_dist=0):
"""Given a source, spawns an actor at that source"""
ego_location = CarlaDataProvider.get_location(self._ego_actor)
source_transform = source.wp.transform
if ego_location.distance(source_transform.location) < ego_dist:
return None
new_transform = carla.Transform(
source_transform.location + carla.Location(z=self._spawn_vertical_shift),
source_transform.rotation
)
actor = CarlaDataProvider.request_new_actor(
'vehicle.*', new_transform, rolename='background',
autopilot=True, random_location=False, safe_blueprint=True, tick=False)
if not actor:
return actor
self._tm.auto_lane_change(actor, False)
if self._night_mode:
actor.set_light_state(carla.VehicleLightState(
carla.VehicleLightState.Position | carla.VehicleLightState.LowBeam))
return actor
def _is_location_behind_ego(self, location):
"""Checks if an actor is behind the ego. Uses the route transform"""
ego_transform = self._route[self._route_index].transform
ego_heading = ego_transform.get_forward_vector()
ego_actor_vec = location - ego_transform.location
if ego_heading.x * ego_actor_vec.x + ego_heading.y * ego_actor_vec.y < - 0.17: # 100º
return True
return False
def _get_ego_route_lane_key(self, route_wp):
"""
Gets the route lane key of the ego. This corresponds to the same lane if the ego is driving normally,
but if is is going in opposite direction, the route's leftmost one is chosen instead
"""
location = CarlaDataProvider.get_location(self._ego_actor)
ego_true_wp = self._map.get_waypoint(location)
if get_road_key(ego_true_wp) != get_road_key(route_wp):
# Just return the default value as two different roads are being compared.
# This might happen for when moving to a new road and should be fixed next frame
return get_lane_key(route_wp)
yaw_diff = (ego_true_wp.transform.rotation.yaw - route_wp.transform.rotation.yaw) % 360
if yaw_diff < 90 or yaw_diff > 270:
return get_lane_key(ego_true_wp)
else:
# Get the first lane of the opposite direction
leftmost_wp = route_wp
while True:
possible_left_wp = leftmost_wp.get_left_lane()
if possible_left_wp is None or possible_left_wp.lane_id * leftmost_wp.lane_id < 0:
break
leftmost_wp = possible_left_wp
return get_lane_key(leftmost_wp)
def _update_road_actors(self, prev_ego_index, current_ego_index):
"""
Dynamically controls the actor speed in front of the ego.
Not applied to those behind it so that they can catch up it
"""
route_wp = self._route[current_ego_index]
scenario_actors = self._scenario_4_actors + self._scenario_2_actors
for actor in self._road_actors:
location = CarlaDataProvider.get_location(actor)
if not location:
continue
if self.debug:
back_actor = False
for lane in self._road_back_actors:
if actor in self._road_back_actors[lane]:
back_actor = True
if back_actor:
draw_string(self._world, location, 'R(B)', 'road', False)
else:
draw_string(self._world, location, 'R(F)', 'road', False)
if actor in scenario_actors:
continue
if self._is_location_behind_ego(location):
continue
distance = location.distance(route_wp.transform.location)
speed_red = (distance - self._min_radius) / (self._max_radius - self._min_radius) * 100
speed_red = np.clip(speed_red, 0, 100)
self._tm.vehicle_percentage_speed_difference(actor, speed_red)
# Check how the vehicles behind are
self._check_back_vehicles(prev_ego_index, current_ego_index)
def _check_back_vehicles(self, prev_route_index, current_route_index):
"""
Checks if any of the vehicles that should be behind the ego are in front, updating the road radius.
This is done by monitoring the closest lane key to the ego that is part of the route,
and needs some remaping when the ego enters a new road
"""
route_wp = self._route[current_route_index]
prev_route_wp = self._route[prev_route_index]
check_dist = 1.1 * route_wp.transform.location.distance(prev_route_wp.transform.location)
if prev_route_index != current_route_index:
road_change = self._found_a_road_change(prev_route_index, current_route_index, ignore_false_junctions=False)
if not self._is_junction(prev_route_wp) and road_change:
# Get all the wps of the new road
if not route_wp.is_junction:
new_wps = get_same_dir_lanes(route_wp)
else: # Entering a false junction
new_wps = []
for enter_wp, _ in route_wp.get_junction().get_waypoints(carla.LaneType.Driving):
if get_road_key(enter_wp) == get_road_key(route_wp):
new_wps.append(enter_wp)
# Get all the wps of the old road
if not prev_route_wp.is_junction:
old_wps = get_same_dir_lanes(prev_route_wp)
else: # Exitting a false junction
old_wps = []
for _, exit_wp in prev_route_wp.get_junction().get_waypoints(carla.LaneType.Driving):
if get_road_key(exit_wp) == get_road_key(prev_route_wp):
old_wps.append(exit_wp)
# Map the new lanes to the old ones
mapped_keys = {}
unmapped_wps = new_wps
for old_wp in list(old_wps):
location = old_wp.transform.location
mapped_wp = None
for new_wp in unmapped_wps:
if location.distance(new_wp.transform.location) < check_dist:
mapped_wp = new_wp
break
if mapped_wp:
unmapped_wps.remove(mapped_wp)
mapped_keys[get_lane_key(old_wp)] = get_lane_key(mapped_wp)
# Remake the road back actors dictionary
new_road_back_actors = {}
for lane_key in self._road_back_actors:
if lane_key not in mapped_keys:
continue # A lane ended at that road
new_lane_key = mapped_keys[lane_key]
new_road_back_actors[new_lane_key] = self._road_back_actors[lane_key]
# For the active sources, change the mapped key to the new road keys
for source in self._road_sources:
if source.mapped_key in mapped_keys:
source.mapped_key = mapped_keys[source.mapped_key]
self._road_back_actors = new_road_back_actors
# New lanes, add new sources
for unmapped_wp in unmapped_wps:
source_wps = unmapped_wp.next(self._road_new_sources_dist)
if len(source_wps) != 1:
continue
new_source = Source(source_wps[0], [])
self._road_sources.append(new_source)
self._road_back_actors[new_source.mapped_key] = []
if not self._road_ego_key in mapped_keys:
# Return the default. This might happen when the route lane ends and should be fixed next frame
self._road_ego_key = get_lane_key(route_wp)
else:
self._road_ego_key = mapped_keys[self._road_ego_key]
else:
self._road_ego_key = self._get_ego_route_lane_key(route_wp)
# Get the amount of vehicles in front of the ego
if not self._road_ego_key in self._road_back_actors:
return
self._road_extra_front_actors = 0
for actor in self._road_back_actors[self._road_ego_key]:
if not self._is_location_behind_ego(actor.get_location()):
self._road_extra_front_actors += 1
self._get_road_radius()
self._compute_parameters()
def _update_junction_actors(self):
"""
Handles an actor depending on their previous state. Actors entering the junction have its exit
monitored through their waypoint. When they exit, they are either moved to a connecting junction,
or added to the exit dictionary. Actors that exited the junction will stop after a certain distance
"""
if len(self._active_junctions) == 0:
return
max_index = len(self._active_junctions) - 1
for i, junction in enumerate(self._active_junctions):
if self.debug:
route_keys = junction.route_entry_keys + junction.route_exit_keys
route_oppo_keys = junction.opposite_entry_keys + junction.opposite_exit_keys
for wp in junction.entry_wps + junction.exit_wps:
if get_lane_key(wp) in route_keys:
draw_point(self._world, wp.transform.location, 'medium', 'road', False)
elif get_lane_key(wp) in route_oppo_keys:
draw_point(self._world, wp.transform.location, 'medium', 'opposite', False)
else:
draw_point(self._world, wp.transform.location, 'medium', 'junction', False)
actor_dict = junction.actor_dict
exit_dict = junction.exit_dict
remove_middle = junction.scenario_info['remove_middle']
for actor in list(actor_dict):
if actor not in actor_dict:
continue # Actor was removed during the loop
location = CarlaDataProvider.get_location(actor)
if not location:
continue
state, exit_lane_key, _ = actor_dict[actor].values()
if self.debug:
string = 'J' + str(i+1) + "_" + state[9:11]
draw_string(self._world, location, string, self._ego_state, False)
# Monitor its entry
if state == 'junction_entry':
actor_wp = self._map.get_waypoint(location)
if self._is_junction(actor_wp) and junction.contains(actor_wp.get_junction()):
if remove_middle:
self._destroy_actor(actor) # Don't clutter the junction if a junction scenario is active
continue
actor_dict[actor]['state'] = 'junction_middle'
# Monitor its exit and destroy an actor if needed
elif state == 'junction_middle':
actor_wp = self._map.get_waypoint(location)
actor_lane_key = get_lane_key(actor_wp)
if not self._is_junction(actor_wp) and actor_lane_key in exit_dict:
if i < max_index and actor_lane_key in junction.route_exit_keys:
# Exited through a connecting lane in the route direction.
self._remove_actor_info(actor)
other_junction = self._active_junctions[i+1]
self._add_actor_dict_element(other_junction.actor_dict, actor)
elif i > 0 and actor_lane_key in junction.opposite_exit_keys:
# Exited through a connecting lane in the opposite direction.
# THIS SHOULD NEVER HAPPEN, an entry source should have already added it.
other_junction = self._active_junctions[i-1]
if actor not in other_junction.actor_dict:
self._remove_actor_info(actor)
self._add_actor_dict_element(other_junction.actor_dict, actor, at_oppo_entry_lane=True)
else:
# Check the lane capacity
exit_dict[actor_lane_key]['ref_wp'] = actor_wp
actor_dict[actor]['state'] = 'junction_exit'
actor_dict[actor]['exit_lane_key'] = actor_lane_key
actors = exit_dict[actor_lane_key]['actors']
if len(actors) > 0 and len(actors) >= exit_dict[actor_lane_key]['max_actors']:
self._destroy_actor(actors[0]) # This is always the front most vehicle
actors.append(actor)
# Deactivate them when far from the junction
elif state == 'junction_exit':
distance = location.distance(exit_dict[exit_lane_key]['ref_wp'].transform.location)
if distance > exit_dict[exit_lane_key]['max_distance']:
self._tm.vehicle_percentage_speed_difference(actor, 100)
actor_dict[actor]['state'] = 'junction_inactive'
# Wait for something to happen
elif state == 'junction_inactive':
pass
def _update_opposite_actors(self, ref_transform):
"""
Updates the opposite actors. This involves tracking their position,
removing if too far behind the ego
"""
max_dist = max(self._opposite_removal_dist, self._opposite_spawn_dist)
for actor in list(self._opposite_actors):
location = CarlaDataProvider.get_location(actor)
if not location:
continue
if self.debug:
draw_string(self._world, location, 'O', 'opposite', False)
distance = location.distance(ref_transform.location)
if distance > max_dist and self._is_location_behind_ego(location):
self._destroy_actor(actor)
def _remove_actor_info(self, actor):
"""Removes all the references of the actor"""
if actor in self._road_actors:
self._road_actors.remove(actor)
if actor in self._opposite_actors:
self._opposite_actors.remove(actor)
if actor in self._scenario_2_actors:
self._scenario_2_actors.remove(actor)
if actor in self._scenario_4_actors:
self._scenario_4_actors.remove(actor)
for road_source in self._road_sources:
if actor in road_source.actors:
road_source.actors.remove(actor)
break
for lane in self._road_back_actors:
if actor in self._road_back_actors[lane]:
self._road_back_actors[lane].remove(actor)
break
for opposite_source in self._opposite_sources:
if actor in opposite_source.actors:
opposite_source.actors.remove(actor)
break
for junction in self._active_junctions:
junction.actor_dict.pop(actor, None)
for exit_source in junction.exit_sources:
if actor in exit_source.actors:
exit_source.actors.remove(actor)
break
for entry_source in junction.entry_sources:
if actor in entry_source.actors:
entry_source.actors.remove(actor)
break
for exit_keys in junction.exit_dict:
exit_actors = junction.exit_dict[exit_keys]['actors']
if actor in exit_actors:
exit_actors.remove(actor)
break
def _destroy_actor(self, actor):
"""Destroy the actor and all its references"""
self._remove_actor_info(actor)
try:
actor.destroy()
except RuntimeError:
pass
def _update_ego_route_location(self, location):
"""Returns the closest route location to the ego"""
for index in range(self._route_index, min(self._route_index + self._route_buffer, self._route_length)):
route_wp = self._route[index]
route_wp_dir = route_wp.transform.get_forward_vector() # Waypoint's forward vector
veh_wp_dir = location - route_wp.transform.location # vector waypoint - vehicle
dot_ve_wp = veh_wp_dir.x * route_wp_dir.x + veh_wp_dir.y * route_wp_dir.y + veh_wp_dir.z * route_wp_dir.z
if dot_ve_wp > 0:
self._route_index = index
return self._route[self._route_index]
| 44.741242 | 120 | 0.608784 | 91,784 | 0.958149 | 0 | 0 | 0 | 0 | 0 | 0 | 18,687 | 0.195077 |
6f3d6d699afb7966b9d1c11324477310b224dc24 | 502 | py | Python | Python/Interfacing_C_C++_Fortran/F2py/comp_pi_f2py.py | Gjacquenot/training-material | 16b29962bf5683f97a1072d961dd9f31e7468b8d | [
"CC-BY-4.0"
]
| 115 | 2015-03-23T13:34:42.000Z | 2022-03-21T00:27:21.000Z | Python/Interfacing_C_C++_Fortran/F2py/comp_pi_f2py.py | Gjacquenot/training-material | 16b29962bf5683f97a1072d961dd9f31e7468b8d | [
"CC-BY-4.0"
]
| 56 | 2015-02-25T15:04:26.000Z | 2022-01-03T07:42:48.000Z | Python/Interfacing_C_C++_Fortran/F2py/comp_pi_f2py.py | Gjacquenot/training-material | 16b29962bf5683f97a1072d961dd9f31e7468b8d | [
"CC-BY-4.0"
]
| 59 | 2015-11-26T11:44:51.000Z | 2022-03-21T00:27:22.000Z | #!/usr/bin/env python
from argparse import ArgumentParser
import sys
from comp_pi import compute_pi
def main():
arg_parser = ArgumentParser(description='compute pi using Fortran '
'function')
arg_parser.add_argument('n', default=1000, nargs='?',
help='number of random points')
options = arg_parser.parse_args()
print(compute_pi(options.n))
return 0
if __name__ == '__main__':
status = main()
sys.exit(status)
| 25.1 | 71 | 0.62749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.197211 |
6f3d81cff53a00e04f111ddf20aa94a2c2b57bda | 3,885 | py | Python | test/lazy/test_cat_lazy_tensor.py | Mehdishishehbor/gpytorch | 432e537b3f6679ea4ab3acf33b14626b7e161c92 | [
"MIT"
]
| null | null | null | test/lazy/test_cat_lazy_tensor.py | Mehdishishehbor/gpytorch | 432e537b3f6679ea4ab3acf33b14626b7e161c92 | [
"MIT"
]
| null | null | null | test/lazy/test_cat_lazy_tensor.py | Mehdishishehbor/gpytorch | 432e537b3f6679ea4ab3acf33b14626b7e161c92 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
import unittest
import torch
from Lgpytorch.lazy import CatLazyTensor, NonLazyTensor
from Lgpytorch.test.lazy_tensor_test_case import LazyTensorTestCase
class TestCatLazyTensor(LazyTensorTestCase, unittest.TestCase):
seed = 1
def create_lazy_tensor(self):
root = torch.randn(6, 7)
self.psd_mat = root.matmul(root.t())
slice1_mat = self.psd_mat[:2, :].requires_grad_()
slice2_mat = self.psd_mat[2:4, :].requires_grad_()
slice3_mat = self.psd_mat[4:6, :].requires_grad_()
slice1 = NonLazyTensor(slice1_mat)
slice2 = NonLazyTensor(slice2_mat)
slice3 = NonLazyTensor(slice3_mat)
return CatLazyTensor(slice1, slice2, slice3, dim=-2)
def evaluate_lazy_tensor(self, lazy_tensor):
return self.psd_mat.detach().clone().requires_grad_()
class TestCatLazyTensorColumn(LazyTensorTestCase, unittest.TestCase):
seed = 1
def create_lazy_tensor(self):
root = torch.randn(6, 7)
self.psd_mat = root.matmul(root.t())
slice1_mat = self.psd_mat[:, :2].requires_grad_()
slice2_mat = self.psd_mat[:, 2:4].requires_grad_()
slice3_mat = self.psd_mat[:, 4:6].requires_grad_()
slice1 = NonLazyTensor(slice1_mat)
slice2 = NonLazyTensor(slice2_mat)
slice3 = NonLazyTensor(slice3_mat)
return CatLazyTensor(slice1, slice2, slice3, dim=-1)
def evaluate_lazy_tensor(self, lazy_tensor):
return self.psd_mat.detach().clone().requires_grad_()
class TestCatLazyTensorBatch(LazyTensorTestCase, unittest.TestCase):
seed = 0
def create_lazy_tensor(self):
root = torch.randn(3, 6, 7)
self.psd_mat = root.matmul(root.transpose(-2, -1))
slice1_mat = self.psd_mat[..., :2, :].requires_grad_()
slice2_mat = self.psd_mat[..., 2:4, :].requires_grad_()
slice3_mat = self.psd_mat[..., 4:6, :].requires_grad_()
slice1 = NonLazyTensor(slice1_mat)
slice2 = NonLazyTensor(slice2_mat)
slice3 = NonLazyTensor(slice3_mat)
return CatLazyTensor(slice1, slice2, slice3, dim=-2)
def evaluate_lazy_tensor(self, lazy_tensor):
return self.psd_mat.detach().clone().requires_grad_()
class TestCatLazyTensorMultiBatch(LazyTensorTestCase, unittest.TestCase):
seed = 0
# Because these LTs are large, we'll skil the big tests
skip_slq_tests = True
def create_lazy_tensor(self):
root = torch.randn(4, 3, 6, 7)
self.psd_mat = root.matmul(root.transpose(-2, -1))
slice1_mat = self.psd_mat[..., :2, :].requires_grad_()
slice2_mat = self.psd_mat[..., 2:4, :].requires_grad_()
slice3_mat = self.psd_mat[..., 4:6, :].requires_grad_()
slice1 = NonLazyTensor(slice1_mat)
slice2 = NonLazyTensor(slice2_mat)
slice3 = NonLazyTensor(slice3_mat)
return CatLazyTensor(slice1, slice2, slice3, dim=-2)
def evaluate_lazy_tensor(self, lazy_tensor):
return self.psd_mat.detach().clone().requires_grad_()
class TestCatLazyTensorBatchCat(LazyTensorTestCase, unittest.TestCase):
seed = 0
# Because these LTs are large, we'll skil the big tests
skip_slq_tests = True
def create_lazy_tensor(self):
root = torch.randn(5, 3, 6, 7)
self.psd_mat = root.matmul(root.transpose(-2, -1))
slice1_mat = self.psd_mat[:2, ...].requires_grad_()
slice2_mat = self.psd_mat[2:3, ...].requires_grad_()
slice3_mat = self.psd_mat[3:, ...].requires_grad_()
slice1 = NonLazyTensor(slice1_mat)
slice2 = NonLazyTensor(slice2_mat)
slice3 = NonLazyTensor(slice3_mat)
return CatLazyTensor(slice1, slice2, slice3, dim=0)
def evaluate_lazy_tensor(self, lazy_tensor):
return self.psd_mat.detach().clone().requires_grad_()
if __name__ == "__main__":
unittest.main()
| 31.844262 | 73 | 0.667954 | 3,642 | 0.937452 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.036551 |
6f3d9e5be4e02104620356819d1fd22753eef212 | 3,349 | py | Python | dbSchema.py | zikasak/ReadOnlyBot | 912403a5d6386c1ce691bbe22dad660af49b26e8 | [
"MIT"
]
| 1 | 2020-12-17T20:50:29.000Z | 2020-12-17T20:50:29.000Z | dbSchema.py | zikasak/ReadOnlyBot | 912403a5d6386c1ce691bbe22dad660af49b26e8 | [
"MIT"
]
| null | null | null | dbSchema.py | zikasak/ReadOnlyBot | 912403a5d6386c1ce691bbe22dad660af49b26e8 | [
"MIT"
]
| null | null | null | import datetime
from sqlalchemy import Column, Integer, Boolean, ForeignKey, String, DateTime, UniqueConstraint, ForeignKeyConstraint
from sqlalchemy.orm import relationship
from dbConfig import Base, engine
class GroupStatus(Base):
__tablename__ = "groupstatus"
id = Column(Integer, primary_key=True)
status = Column(Boolean, default=False)
wel_message = Column(String)
new_users_blocked = Column(Boolean, default=False)
time_to_mute = Column(Integer, default=30)
messages = relationship("GroupMessage", cascade="save-update, merge, delete, delete-orphan")
banned_users = relationship("BannedUser", cascade="save-update, merge, delete, delete-orphan")
mutted_users = relationship("MutedUser", backref="chat", cascade="save-update, merge, delete, delete-orphan")
blocked_phrases = relationship("BlockedPhrases", backref="chat", cascade="save-update, merge, delete, delete-orphan")
def add_muted(self, user_id, message_id):
m = MutedUser()
m.chat_id = self.id
m.user_id = user_id
m.welcome_msg_id = message_id
m.mute_date = datetime.datetime.utcnow()
if m not in self.mutted_users:
self.mutted_users.append(m)
class GroupMessage(Base):
__tablename__ = "groupmessage"
chat_id = Column(Integer, ForeignKey("groupstatus.id"), primary_key=True)
message = Column(String)
command = Column(String, primary_key=True)
description = Column(String, default="")
UniqueConstraint('chat_id', 'command')
def __repr__(self):
return "{!r} - {!r}".format(self.command, self.description)
class MutedUser(Base):
__tablename__ = "muted"
chat_id = Column(Integer, ForeignKey("groupstatus.id"), primary_key=True)
user_id = Column(Integer, primary_key=True)
mute_date = Column(DateTime(timezone=True), nullable=False, default=datetime.datetime.utcnow())
welcome_msg_id = Column(Integer, nullable=False)
time_messages = relationship("TimeExceededMessage", cascade="save-update, merge, delete, delete-orphan",
primaryjoin="and_(MutedUser.chat_id==TimeExceededMessage.chat_id, "
"MutedUser.user_id==TimeExceededMessage.user_id)")
def __eq__(self, obj: object) -> bool:
if type(obj) != MutedUser:
return super().__eq__(obj)
return (self.chat_id == obj.chat_id) and (self.user_id == obj.user_id)
class TimeExceededMessage(Base):
__tablename__ = "mutedMessages"
id = Column(Integer, primary_key=True)
chat_id = Column(Integer)
user_id = Column(Integer)
welcome_msg_id = Column(Integer, ForeignKey("muted.welcome_msg_id"))
msg_id = Column(Integer, nullable=False)
__table_args__ = (ForeignKeyConstraint([chat_id, user_id], [MutedUser.chat_id, MutedUser.user_id]), {})
class BannedUser(Base):
__tablename__ = "bannedusers"
chat_id = Column(Integer, ForeignKey("groupstatus.id"), primary_key=True)
user_id = Column(Integer, primary_key=True)
username = Column(String)
reason = Column(String)
class BlockedPhrases(Base):
__tablename__ = "blockedPhrases"
id = Column(Integer, primary_key=True)
chat_id = Column(Integer, ForeignKey("groupstatus.id"))
blockedPhrase = Column(String, nullable=False)
Base.metadata.create_all(engine)
| 40.349398 | 121 | 0.701702 | 3,088 | 0.922066 | 0 | 0 | 0 | 0 | 0 | 0 | 602 | 0.179755 |
6f3e4697377cf878d0a79c14a88b2faa221afbab | 2,224 | py | Python | dqn/dqn_noisy_networks/model.py | AgentMaker/Paddle-RLBooks | 2e879f7ec3befa2058f0181e205b790d47770a85 | [
"Apache-2.0"
]
| 127 | 2021-03-22T07:34:43.000Z | 2022-02-04T13:33:15.000Z | dqn/dqn_noisy_networks/model.py | WhiteFireFox/Paddle-RLBooks | 1a6add1d01b1bab08bb9d246fcd6ab852a43c18c | [
"Apache-2.0"
]
| 1 | 2021-05-16T09:51:07.000Z | 2021-05-16T09:51:07.000Z | dqn/dqn_noisy_networks/model.py | WhiteFireFox/Paddle-RLBooks | 1a6add1d01b1bab08bb9d246fcd6ab852a43c18c | [
"Apache-2.0"
]
| 16 | 2021-04-03T05:31:30.000Z | 2022-03-26T07:53:49.000Z | import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn.initializer import Assign
import math
class NoisyLinear(nn.Linear):
def __init__(self, in_features, out_features, sigma_zero=0.4, bias=True):
super(NoisyLinear, self).__init__(in_features, out_features)
sigma_init = sigma_zero / math.sqrt(in_features)
sigma_weight = self.create_parameter(
shape=[in_features, out_features],
default_initializer=Assign(
paddle.full((in_features, out_features), sigma_init)
)
)
self.add_parameter("sigma_weight", sigma_weight)
self.register_buffer("epsilon_input", paddle.zeros((1, in_features)))
self.register_buffer("epsilon_output", paddle.zeros((out_features, 1)))
if bias:
sigma_bias = self.create_parameter(
shape=[out_features],
default_initializer=Assign(
paddle.full([out_features], sigma_init)
)
)
self.add_parameter("sigma_bias", sigma_bias)
def _scale_noise(self, shape):
x = paddle.randn(shape)
return x.sign().multiply(x.abs().sqrt())
def forward(self, inputs):
with paddle.no_grad():
eps_in = self._scale_noise(self.epsilon_input.shape)
eps_out = self._scale_noise(self.epsilon_output.shape)
noise_v = paddle.multiply(eps_in, eps_out).detach()
return F.linear(inputs, self.weight + self.sigma_weight * noise_v.t(), self.bias + self.sigma_bias * eps_out.squeeze().t())
class Model(nn.Layer):
def __init__(self, num_inputs, num_actions):
super(Model, self).__init__()
self.conv1 = nn.Conv2D(num_inputs, 32, 3, stride=3)
self.conv2 = nn.Conv2D(32, 32, 3, stride=3)
self.conv3 = nn.Conv2D(32, 64, 3, stride=1)
self.flatten = nn.Flatten()
self.linear = NoisyLinear(64 * 3 * 2, 256)
self.fc = NoisyLinear(256, num_actions)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = self.flatten(x)
x = self.linear(x)
return self.fc(x)
| 38.344828 | 131 | 0.619155 | 2,096 | 0.942446 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.025629 |
6f3fea7c8f1bfc40279f3c4ea0ed4489009162a1 | 50 | py | Python | players/__init__.py | lejbron/arkenstone | d5341c27ba81eaf116e5ee5983b4fa422437d294 | [
"MIT"
]
| null | null | null | players/__init__.py | lejbron/arkenstone | d5341c27ba81eaf116e5ee5983b4fa422437d294 | [
"MIT"
]
| 4 | 2021-03-17T19:46:35.000Z | 2021-04-09T11:37:53.000Z | players/__init__.py | lejbron/arkenstone | d5341c27ba81eaf116e5ee5983b4fa422437d294 | [
"MIT"
]
| 1 | 2021-04-11T07:50:56.000Z | 2021-04-11T07:50:56.000Z | default_app_config = 'players.apps.PlayersConfig'
| 25 | 49 | 0.84 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.56 |
6f405d7dc1023a5440b606895121fbd0e2262df7 | 1,631 | py | Python | forte/utils/utils_io.py | swapnull7/forte | 737a72afd440d40c3826c3a7c5e4e44235c0f701 | [
"Apache-2.0"
]
| 2 | 2021-01-01T12:07:27.000Z | 2021-09-10T03:57:18.000Z | forte/utils/utils_io.py | swapnull7/forte | 737a72afd440d40c3826c3a7c5e4e44235c0f701 | [
"Apache-2.0"
]
| null | null | null | forte/utils/utils_io.py | swapnull7/forte | 737a72afd440d40c3826c3a7c5e4e44235c0f701 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions related to input/output.
"""
import os
__all__ = [
"maybe_create_dir",
"ensure_dir",
"get_resource"
]
import sys
def maybe_create_dir(dirname: str) -> bool:
r"""Creates directory if it does not exist.
Args:
dirname (str): Path to the directory.
Returns:
bool: Whether a new directory is created.
"""
if not os.path.isdir(dirname):
os.makedirs(dirname)
return True
return False
def ensure_dir(filename: str):
"""
Args:
filename:
Returns:
"""
d = os.path.dirname(filename)
if d:
maybe_create_dir(d)
def get_resource(path_name, is_file=True):
for dirname in sys.path:
candidate = os.path.join(dirname, path_name)
if is_file:
if os.path.isfile(candidate):
return candidate
else:
if os.path.exists(candidate):
return candidate
raise FileNotFoundError("Can't find file %s in python path." % path_name)
| 24.343284 | 77 | 0.660331 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 948 | 0.581239 |
6f4276bb292fddfa79fdb894416964ab4cf57b3a | 4,834 | py | Python | src/AFN.py | mbampi/LinguagensRegulares | 1fc7fbcc21053577bbeb1f71e742aee3a48f2188 | [
"MIT"
]
| null | null | null | src/AFN.py | mbampi/LinguagensRegulares | 1fc7fbcc21053577bbeb1f71e742aee3a48f2188 | [
"MIT"
]
| null | null | null | src/AFN.py | mbampi/LinguagensRegulares | 1fc7fbcc21053577bbeb1f71e742aee3a48f2188 | [
"MIT"
]
| null | null | null |
import re
from AFD import AFD
class AFN:
def __init__(self, nome=None, estados=[], simbolos=[], estado_inicial=None, estados_finais=[], funcoes_programa={}):
self.nome = nome
self.estados = estados
self.simbolos = simbolos
self.estado_inicial = estado_inicial
self.estados_finais = estados_finais
self.funcoes_programa = funcoes_programa
def __str__(self):
output = f'\nnome={self.nome}'
output += f'\nestados={self.estados}'
output += f'\nsimbolos={self.simbolos}'
output += f'\nestado_inicial={self.estado_inicial}'
output += f'\nestados_finais={self.estados_finais}'
output += f'\nfuncoes_programa='
output += str([str(fp) + ' -> ' + str(e) for fp,
e in self.funcoes_programa.items()])
return output
@staticmethod
def afn_de_arquivo(caminho_arquivo):
''' Le um arquivo dado pelo caminho especificado e
retorna um automato finito deterministico'''
with open(caminho_arquivo) as file:
first_line = file.readline().split("=", 1)
nome = first_line[0]
# retira parenteses
str_definicao = first_line[1][1:-1]
# troca '{}' por '()'
str_definicao = str_definicao.replace(
'{', '(').replace('}', ')')
# regex para achar elementos entre ',' ou conjuntos de elementos entre '{}'
regex_exp = "[^,()]*(?:\([^)]*\))*[^,]*"
definicao = re.findall(regex_exp, str_definicao)
# tira os '()' e tira espacos em branco
definicao = [i.strip().replace('(', '').replace(')', '')
for i in definicao if i]
# separa string pelas ','
definicao = [i.split(',') for i in definicao]
estados = definicao[0]
simbolos = definicao[1]
estado_inicial = definicao[2][0]
estados_finais = definicao[3]
# descarta linha 'Prog'
file.readline()
funcoes_programa = {}
for line in file.readlines():
estado = re.search('^\((.*),', line)[0][1: -1]
simbolo = re.search(',(.*)\)=', line)[0][1: -2]
estado_resultante = re.search('=(.*)$', line)[0][1:]
if funcoes_programa.get((estado, simbolo)):
funcoes_programa[(estado, simbolo)].append(
estado_resultante)
else:
funcoes_programa[(estado, simbolo)] = [estado_resultante]
return AFN(nome, estados, simbolos, estado_inicial, estados_finais, funcoes_programa)
@staticmethod
def _saidas_novo_estado(estado, simbolo, funcoes_programa):
estados = estado.split('+')
saidas = []
for e in estados:
estado_resultante = funcoes_programa.get((e, simbolo))
if estado_resultante:
saidas.extend(estado_resultante)
if saidas == []:
return 'QM'
return '+'.join(sorted(list(set(saidas))))
@staticmethod
def _define_estados_finais(estados, estados_finais):
finais = []
for estado in estados:
for ef in estados_finais:
if ef in estado:
finais.append(estado)
return finais
def para_AFD(self):
q = []
t = {}
q.append(self.estado_inicial)
estado_morto = 'QM'
for simbolo in self.simbolos:
estado_resultante = self.funcoes_programa.get(
(self.estado_inicial, simbolo))
if estado_resultante:
t[(self.estado_inicial, simbolo)] = '+'.join(estado_resultante)
else:
t[(self.estado_inicial, simbolo)] = estado_morto
while(set(q) != set(t.values())):
for er in list(t.values()):
if er not in q:
q.append(er)
for simbolo in self.simbolos:
if '+' in er:
t[(er, simbolo)] = AFN._saidas_novo_estado(
er, simbolo, self.funcoes_programa)
else:
estado_resultante = self.funcoes_programa.get(
(er, simbolo))
if estado_resultante:
t[(er, simbolo)] = '+'.join(estado_resultante)
else:
t[(er, simbolo)] = estado_morto
estados_finais = AFN._define_estados_finais(q, self.estados_finais)
return AFD(nome=self.nome, estados=q, simbolos=self.simbolos, estado_inicial=self.estado_inicial, estados_finais=estados_finais, funcoes_programa=t)
| 37.184615 | 156 | 0.528962 | 4,800 | 0.992966 | 0 | 0 | 2,510 | 0.519239 | 0 | 0 | 599 | 0.123914 |
6f428bd943ae35a4fd79dc7877617c8e0b05143f | 11,348 | py | Python | cs_tools/tools/_searchable-dependencies/app.py | thoughtspot/cs_tools | 7b516476be94adf7f121645b7c3fc7206fdae4ca | [
"MIT"
]
| 1 | 2022-03-14T19:04:53.000Z | 2022-03-14T19:04:53.000Z | cs_tools/tools/_searchable-dependencies/app.py | thoughtspot/cs_tools | 7b516476be94adf7f121645b7c3fc7206fdae4ca | [
"MIT"
]
| 10 | 2021-06-01T14:34:52.000Z | 2022-03-24T00:47:47.000Z | cs_tools/tools/_searchable-dependencies/app.py | thoughtspot/cs_tools | 7b516476be94adf7f121645b7c3fc7206fdae4ca | [
"MIT"
]
| null | null | null | from typing import List, Dict
import pathlib
import shutil
import enum
from typer import Option as O_
import typer
from cs_tools.helpers.cli_ux import console, frontend, CSToolsGroup, CSToolsCommand
from cs_tools.util.datetime import to_datetime
from cs_tools.tools.common import run_tql_command, run_tql_script, tsload
from cs_tools.util.algo import chunks
from cs_tools.settings import TSConfig
from cs_tools.const import FMT_TSLOAD_DATETIME
from cs_tools.thoughtspot import ThoughtSpot
from cs_tools.tools import common
from .util import FileQueue
HERE = pathlib.Path(__file__).parent
class SystemType(str, enum.Enum):
"""
Reversible mapping of system to friendly names.
"""
ONE_TO_ONE_LOGICAL = 'system table'
USER_DEFINED = 'imported data'
WORKSHEET = 'worksheet'
AGGR_WORKSHEET = 'view'
PINBOARD_ANSWER_BOOK = 'pinboard'
QUESTION_ANSWER_BOOK = 'saved answer'
MATERIALIZED_VIEW = 'materialized view'
CALENDAR_TABLE = 'custom calendar'
FORMULA = 'formula'
@classmethod
def to_friendly(cls, value) -> str:
return getattr(cls, value).value
@classmethod
def to_system(cls, value) -> str:
return getattr(cls, value).name
class ParentType(str, enum.Enum):
"""
Limits the type of objects passed on via CLI.
"""
SYSTEM_TABLE = 'system table'
IMPORTED_DATA = 'imported data'
WORKSHEET = 'worksheet'
VIEW = 'view'
def _format_metadata_objects(queue, metadata: List[Dict]):
"""
Standardize data in an expected format.
This is a simple transformation layer, we are fitting our data to be
record-based and in the format that's expected for an eventual
tsload command.
"""
for parent in metadata:
queue.put({
'guid_': parent['id'],
'name': parent['name'],
'description': parent.get('description'),
'author_guid': parent['author'],
'author_name': parent['authorName'],
'author_display_name': parent['authorDisplayName'],
'created': to_datetime(parent['created'], unit='ms').strftime(FMT_TSLOAD_DATETIME),
'modified': to_datetime(parent['modified'], unit='ms').strftime(FMT_TSLOAD_DATETIME),
# 'modified_by': parent['modifiedBy'] # user.guid
'type': SystemType.to_friendly(parent['type']) if parent.get('type') else 'column',
'context': parent.get('owner')
})
def _format_dependency(queue, parent_guid, dependencies: Dict[str, Dict]):
"""
Standardize data in an expected format.
This is a simple transformation layer, we are fitting our data to be
record-based and in the format that's expected for an eventual
tsload command.
"""
for dependency in dependencies:
queue.put({
'guid_': dependency['id'],
'parent_guid': parent_guid,
'name': dependency['name'],
'description': dependency.get('description'),
'author_guid': dependency['author'],
'author_name': dependency['authorName'],
'author_display_name': dependency['authorDisplayName'],
'created': to_datetime(dependency['created'], unit='ms').strftime(FMT_TSLOAD_DATETIME),
'modified': to_datetime(dependency['modified'], unit='ms').strftime(FMT_TSLOAD_DATETIME),
# 'modified_by': dependency['modifiedBy'] # user.guid
'type': SystemType.to_friendly(dependency['type'])
})
def _get_dependents(api: ThoughtSpot, queue, parent: str, metadata: List[Dict]):
for chunk in chunks(metadata, n=50):
r = api._dependency.list_dependents(
id=[_['id'] for _ in chunk],
type='LOGICAL_COLUMN' if parent in ('formula', 'column') else 'LOGICAL_TABLE',
batchsize=-1,
timeout=None if parent == 'column' else -1
)
for parent_guid, dependent_data in r.json().items():
for dependency_type, dependencies in dependent_data.items():
for dependency in dependencies:
dependency['type'] = dependency.get('type', dependency_type)
queue.put({
'guid_': dependency['id'],
'parent_guid': parent_guid,
'name': dependency['name'],
'description': dependency.get('description'),
'author_guid': dependency['author'],
'author_name': dependency['authorName'],
'author_display_name': dependency['authorDisplayName'],
'created': to_datetime(dependency['created'], unit='ms').strftime(FMT_TSLOAD_DATETIME),
'modified': to_datetime(dependency['modified'], unit='ms').strftime(FMT_TSLOAD_DATETIME),
# 'modified_by': dependency['modifiedBy'] # user.guid
'type': SystemType.to_friendly(dependency['type'])
})
def _get_recordset_metadata(api: ThoughtSpot) -> Dict[str, List]:
_seen = {}
metadata = {
'system table': [],
'imported data': [],
'worksheet': [],
'view': [],
'formula': [],
'column': [],
'other': []
}
active_users = common.batched(
api._metadata.list,
type='USER',
batchsize=5000,
transformer=lambda r: r.json()['headers']
)
r = [
*common.batched(
api._metadata.list,
type='LOGICAL_TABLE',
batchsize=5000,
transformer=lambda r: r.json()['headers']
),
*common.batched(
api._metadata.list,
type='LOGICAL_COLUMN',
batchsize=5000,
# NOTE: "True" = includes Custom Calendars & Materialized Views...
# auto_created=False,
transformer=lambda r: r.json()['headers']
)
]
for item in r:
try:
friendly = SystemType.to_friendly(item['type'])
except KeyError:
friendly = 'column'
except AttributeError:
friendly = 'other'
author = next((u for u in active_users if u['id'] == item['author']), None) or {}
parent = _seen.get(item['owner']) or {}
item = {
**item,
'friendly': friendly,
'owner': parent.get('name'),
'authorName': author.get('name') or item.get('authorName'),
'authorDisplayName': author.get('displayName') or item.get('authorDisplayName'),
}
_seen[item['id']] = item
metadata[friendly].append(item)
return metadata
app = typer.Typer(
help="""
Make Dependencies searchable in your platform.
[b][yellow]USE AT YOUR OWN RISK![/b] This tool uses private API calls which
could change on any version update and break the tool.[/]
Dependencies can be collected for various types of metadata. For example,
many tables are used within a worksheet, while many worksheets will have
answers and pinboards built on top of them.
\b
Metadata Object Metadata Dependent
- guid - guid
- name - parent guid
- description - name
- author guid - description
- author name - author guid
- author display name - author name
- created - author display name
- modified - created
- object type - modified
- context - object type
\f
Also available, but not developed for..
Tag / Stickers -> TAG
Embrace Connections -> DATA_SOURCE
""",
cls=CSToolsGroup,
options_metavar='[--version, --help]'
)
@app.command(cls=CSToolsCommand)
@frontend
def spotapp(
export: pathlib.Path = O_(None, help='directory to save the spot app to', file_okay=False, resolve_path=True),
# maintained for backwards compatability
backwards_compat: pathlib.Path = O_(None, '--save_path', help='backwards-compat if specified, directory to save data to', hidden=True),
**frontend_kw
):
"""
Exports the SpotApp associated with this tool.
"""
shutil.copy(HERE / 'static' / 'spotapps.zip', export)
console.print(f'moved the SpotApp to {export}')
@app.command(cls=CSToolsCommand)
@frontend
def gather(
export: pathlib.Path = O_(None, help='directory to save the spot app to', file_okay=False, resolve_path=True),
parent: ParentType=O_(None, help='type of object to find dependents for'),
include_columns: bool=O_(False, '--include-columns', help='whether or not to find column dependents', show_default=False),
# maintained for backwards compatability
backwards_compat: pathlib.Path = O_(None, '--save_path', help='backwards-compat if specified, directory to save data to', hidden=True),
**frontend_kw
):
"""
Gather and optionally, insert data into Falcon.
By default, data is automatically gathered and inserted into the
platform. If --export argument is used, data will not be inserted
and will instead be dumped to the location specified.
"""
cfg = TSConfig.from_cli_args(**frontend_kw, interactive=True)
export = export or backwards_compat
dir_ = cfg.temp_dir if export is None else export
dir_.parent.mkdir(exist_ok=True)
static = HERE / 'static'
parent_types = [e.value for e in ParentType] if parent is None else [parent]
if include_columns:
parent_types.extend(['formula', 'column'])
with ThoughtSpot(cfg) as ts:
with console.status('getting top level metadata'):
metadata = _get_recordset_metadata(ts.api)
parent_q = FileQueue(dir_ / 'introspect_metadata_object.csv')
children_q = FileQueue(dir_ / 'introspect_metadata_dependent.csv')
with parent_q as pq, children_q as cq:
for parent in parent_types:
with console.status(f'getting dependents of metadata: {parent}'):
_format_metadata_objects(pq, metadata[parent])
_get_dependents(ts.api, cq, parent, metadata[parent])
if export is not None:
return
try:
with console.status('creating tables with remote TQL'):
run_tql_command(ts, command='CREATE DATABASE cs_tools;')
run_tql_script(ts, fp=static / 'create_tables.tql', raise_errors=True)
except common.TableAlreadyExists:
with console.status('altering tables with remote TQL'):
run_tql_script(ts, fp=static / 'alter_tables.tql')
with console.status('loading data to Falcon with remote tsload'):
for stem in ('introspect_metadata_object', 'introspect_metadata_dependent'):
path = dir_ / f'{stem}.csv'
cycle_id = tsload(
ts,
fp=path,
target_database='cs_tools',
target_table=stem,
has_header_row=True
)
path.unlink()
r = ts.api.ts_dataservice.load_status(cycle_id).json()
m = ts.api.ts_dataservice._parse_tsload_status(r)
console.print(m)
| 36.371795 | 139 | 0.607684 | 829 | 0.073053 | 0 | 0 | 3,684 | 0.324639 | 0 | 0 | 4,197 | 0.369845 |
6f454cefd9a2976b1fecad345694dd6dc38f8205 | 6,098 | py | Python | bots/philBots.py | phyxl/GameOfPureStrategy | 95ec7b1cb0c85dbdd4da315dac02d12d5d9c1a6a | [
"MIT"
]
| null | null | null | bots/philBots.py | phyxl/GameOfPureStrategy | 95ec7b1cb0c85dbdd4da315dac02d12d5d9c1a6a | [
"MIT"
]
| null | null | null | bots/philBots.py | phyxl/GameOfPureStrategy | 95ec7b1cb0c85dbdd4da315dac02d12d5d9c1a6a | [
"MIT"
]
| null | null | null | #!/usr/bin/python
import math
import random
from utils.log import log
from bots.simpleBots import BasicBot
def get_Chosen(num_cards, desired_score):
chosen = list(range(1,num_cards+1))
last_removed = 0
while sum(chosen) > desired_score:
#remove a random element
last_removed = random.randint(0,len(chosen)-1)
add_back = chosen[last_removed]
chosen.remove(add_back)
chosen.append(add_back)
chosen.sort
return chosen
class shiftBot(BasicBot):
def __init__(self, player_num, num_players, num_cards, num_games):
#this bot is pretty dumb, and just plays bottom up
self.shift_hand = list(range(1, num_cards+1))
self.num_cards = num_cards
self.player_num = player_num #I can use this to cheat I think by asking the other bots what they are planning on playing
self.num_players = num_players
self.start_index = 1
def end_game(self, result):
#increment index
self.start_index += 1
if(self.start_index >= self.num_cards):
self.start_index = 0
def take_turn(self, game_state, verbose = False):
num_cards_remaining = len(game_state.current_prizes)
index = (self.start_index + self.num_cards - num_cards_remaining) % self.num_cards
return self.shift_hand[index]
class PhillipAdaptoBot(BasicBot):
def __init__(self, player_num, num_players, num_cards, num_games):
#Bot is initialized once at the beginning of the competition, and persists between games.
self.player_num = player_num #I can use this to cheat I think by asking the other bots what they are planning on playing
self.num_players = num_players #normally 2, but ideally, you should allow your bot to gracefully handle more
self.num_cards = num_cards
self.num_games = 50
self.current_record = 0
self.game_count = 0
self.state = 0 #I'll use this to cycle through strategies attempting to hard counter my opponent
self.implemented_strategies = 8 #can only cycle through strategies that I know
self.wobble = 0 #some secret sauce
self.staying_power = 2
self.desired_score = math.ceil((num_cards + 1) * num_cards / 4)
self.chosen = get_Chosen(self.num_cards, self.desired_score)
return
def end_game(self, result):
#Called by GameArena upon game end. Result is the number of the winning bot previous game, -1 if tie
#Likely want to reset any tracking variables that persist between rounds here.
self.game_count += 1
self.chosen = get_Chosen(self.num_cards, self.desired_score)
if result != self.player_num or self.wobble == 1:
#It think that means I lost, and am not hard countering
self.state += 1
if self.state >= self.implemented_strategies:
self.state = 0 #You're probably sunk at this point
#if self.current_record > self.staying_power:
#self.wobble = 1
self.current_record = 0
else:
self.current_record += 1 # a little ugly, but who cares
#this means I won, and should not change strategy
#want to detect a winning streak
return
def take_turn(self, game_state, verbose = False):
#a completed bot should wrap all log statments in verbosity checks, so we don't get a flooded console if running 1000 iterations
if verbose:
log(self,"This is a verbose print statment!")
#the goal is to beat the opponent by one when possible (most effecient)
num_cards_remaining = len(game_state.current_prizes)
my_score = game_state.current_scores[self.player_num]
my_current_hand = game_state.current_hands[self.player_num]
if self.state == 0:#default case should be obvious bot
play = game_state.prize_this_round
elif self.state == 1: #bidding fairly didn't win the first round, could be playing a random bot or literally anything...
if len(my_current_hand) > 1:
play = self.num_cards - len(my_current_hand) + 2
else:
play = min(my_current_hand)
elif self.state == 2:
play = max(my_current_hand)
elif self.state == 3:
if game_state.prize_this_round < self.num_cards:
play = game_state.prize_this_round + 1
else:
play = 1
elif self.state == 4:
if game_state.prize_this_round < self.num_cards - 1:
play = game_state.prize_this_round + 2
else:
play = min(my_current_hand)
elif self.state == 5:
if game_state.prize_this_round > self.num_cards:
play = game_state.prize_this_round - 1
else:
play = max(my_current_hand)
elif self.state == 6:
if game_state.prize_this_round > self.num_cards + 1:
play = game_state.prize_this_round - 2
else:
play = max(my_current_hand)
elif self.state == 7:
if game_state.prize_this_round in self.chosen:
play = my_current_hand[-(len(self.chosen) - self.chosen.index(game_state.prize_this_round)):][0]
#play = max(my_current_hand)
self.chosen.remove(game_state.prize_this_round)
else:
play = min(my_current_hand)
return play # return a card to play
class PhillipBotUpBot(BasicBot):
def take_turn(self, game, verbose = False):
"""
Called by GameArena when it's time to take your turn. You are passed a "game" object with this info to work with:
card = (int) value 1 thru num_cards
variables available to your bot:
self.player_num = your player number
self.num_players = normally 2, but ideally, you should allow your bot to gracefully handle more
self.num_cards = normally 13, but ideally, you should allow your bot to gracefully handle any amount
game_state.current_won_cards[player_num][cards] = list of cards each player has won so far
game_state.current_scores[player_num] = current score of each each player
game_state.current_hands[player][cards] = list of cards currently in each player's hand
game_state.current_prizes[cards] = list of prizes remaining
game_state.prize_this_round (int) = current prize showing for this round
"""
num_cards_remaining = len(game.current_prizes)
my_score = game.current_scores[self.player_num]
my_current_hand = game.current_hands[self.player_num]
if (my_score > 0) or (game.prize_this_round == 12):
play = max(my_current_hand)
else:
play = min(my_current_hand) #base strategy, need to add tweaks later
return play
| 38.594937 | 130 | 0.735323 | 5,659 | 0.928009 | 0 | 0 | 0 | 0 | 0 | 0 | 2,389 | 0.391768 |
6f459b6385eeaec430778e2b8c2a198dc774b06f | 1,280 | py | Python | tests/ws/TestWebsocketRegisterAgent.py | sinri/nehushtan | 6fda496e16a8d443a86c617173d35f31c392beb6 | [
"MIT"
]
| null | null | null | tests/ws/TestWebsocketRegisterAgent.py | sinri/nehushtan | 6fda496e16a8d443a86c617173d35f31c392beb6 | [
"MIT"
]
| 1 | 2020-11-20T03:10:23.000Z | 2020-11-20T09:30:34.000Z | tests/ws/TestWebsocketRegisterAgent.py | sinri/nehushtan | 6fda496e16a8d443a86c617173d35f31c392beb6 | [
"MIT"
]
| 1 | 2021-10-13T10:16:58.000Z | 2021-10-13T10:16:58.000Z | import uuid
from typing import Dict, List
from nehushtan.ws.NehushtanWebsocketConnectionEntity import NehushtanWebsocketConnectionEntity
class TestWebsocketRegisterAgent:
def __init__(self):
self.__map: Dict[str, NehushtanWebsocketConnectionEntity] = {}
self.agent_identity = str(uuid.uuid4())
def register(self, websocket):
entity = NehushtanWebsocketConnectionEntity(websocket)
self.__map[entity.get_key()] = entity
print(f"TestWebsocketRegisterAgent[{self.agent_identity}] registered [{entity.get_key()}]")
return entity
def unregister(self, key: str):
if self.__map.get(key):
del self.__map[key]
print(f"TestWebsocketRegisterAgent[{self.agent_identity}] unregistered [{key}]")
def read(self, key: str):
print(f"TestWebsocketRegisterAgent[{self.agent_identity}] reading [{key}]")
return self.__map.get(key)
def list_for_server(self, local_key: str) -> List[NehushtanWebsocketConnectionEntity]:
print(f"TestWebsocketRegisterAgent[{self.agent_identity}] listing for [{local_key}]")
enities = []
for k, v in self.__map.items():
if v.get_local_key() == local_key:
enities.append(v)
return enities
| 36.571429 | 99 | 0.682813 | 1,139 | 0.889844 | 0 | 0 | 0 | 0 | 0 | 0 | 303 | 0.236719 |
6f47b4b418f600c91349bca3f946db81bd280d01 | 470 | py | Python | decorator_pattern/starbuzz/condiment.py | garyeechung/design-pattern-practice | 00ca66b79773de06c2d043c33caf37cb5f40a507 | [
"MIT"
]
| 2 | 2021-02-25T06:04:34.000Z | 2021-02-25T06:13:48.000Z | decorator_pattern/starbuzz/condiment.py | garyeechung/design-pattern-practice | 00ca66b79773de06c2d043c33caf37cb5f40a507 | [
"MIT"
]
| 1 | 2021-02-17T16:45:58.000Z | 2021-02-23T12:54:39.000Z | decorator_pattern/starbuzz/condiment.py | garyeechung/design-pattern-practice | 00ca66b79773de06c2d043c33caf37cb5f40a507 | [
"MIT"
]
| null | null | null | from .interface import Beverage, CondimentDecorator
class Mocha(CondimentDecorator):
def __init__(self, beverage: Beverage):
super().__init__(beverage)
self._cost = 0.2
class Whip(CondimentDecorator):
def __init__(self, beverage: Beverage):
super().__init__(beverage)
self._cost = 0.1
class Soy(CondimentDecorator):
def __init__(self, beverage: Beverage):
super().__init__(beverage)
self._cost = 0.15
| 20.434783 | 51 | 0.67234 | 409 | 0.870213 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6f480b5d92cd89679ad9577e9f8230981a8ae4ea | 1,641 | py | Python | src/geo_testing/test_scripts/psgs_big.py | hpgl/hpgl | 72d8c4113c242295de740513093f5779c94ba84a | [
"BSD-3-Clause"
]
| 70 | 2015-01-21T12:24:50.000Z | 2022-03-16T02:10:45.000Z | src/geo_testing/test_scripts/psgs_big.py | hpgl/hpgl | 72d8c4113c242295de740513093f5779c94ba84a | [
"BSD-3-Clause"
]
| 8 | 2015-04-22T13:14:30.000Z | 2021-11-23T12:16:32.000Z | src/geo_testing/test_scripts/psgs_big.py | hpgl/hpgl | 72d8c4113c242295de740513093f5779c94ba84a | [
"BSD-3-Clause"
]
| 18 | 2015-02-15T18:04:31.000Z | 2021-01-16T08:54:32.000Z | #
#
# Copyright 2009 HPGL Team
#
# This file is part of HPGL (High Perfomance Geostatistics Library).
#
# HPGL is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, version 2 of the License.
#
# HPGL is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with HPGL. If not, see http://www.gnu.org/licenses/.
#
from geo import *
from sys import *
import os
import time
if not os.path.exists("results/"):
os.mkdir("results/")
if not os.path.exists("results/medium/"):
os.mkdir("results/medium/")
#grid = SugarboxGrid(166, 141, 225)
#prop = load_cont_property("test_data/BIG_HARD_DATA.INC", -99)
grid = SugarboxGrid(166, 141, 20)
prop = load_cont_property("test_data/BIG_SOFT_DATA_CON_160_141_20.INC",-99)
sgs_params = {
"prop": prop,
"grid": grid,
"seed": 3439275,
"kriging_type": "sk",
"radiuses": (20, 20, 20),
"max_neighbours": 12,
"covariance_type": covariance.exponential,
"ranges": (10, 10, 10),
"sill": 0.4
}
for x in xrange(1):
time1 = time.time()
psgs_result = sgs_simulation(workers_count = x+2, use_new_psgs = True, **sgs_params)
time2 = time.time()
print "Workers: %s" % (x+2)
print "Time: %s" % (time2 - time1)
write_property(psgs_result, "results/medium/PSGS_workers_1.inc", "PSIS_MEDIUM_workers_1", -99)
| 33.489796 | 229 | 0.702011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,033 | 0.629494 |
6f484367a2e17cf732eb810bd88c47b5caccd1c1 | 166 | py | Python | app/src/constants.py | hubacekjirka/dailyPhotoTwitterBot | abd490b73603883d4e71bfa6076e9925a055fcb7 | [
"MIT"
]
| 1 | 2020-03-16T10:51:07.000Z | 2020-03-16T10:51:07.000Z | app/src/constants.py | hubacekjirka/dailyPhotoTwitterBot | abd490b73603883d4e71bfa6076e9925a055fcb7 | [
"MIT"
]
| 6 | 2019-08-11T10:00:36.000Z | 2021-06-02T00:18:58.000Z | app/src/constants.py | hubacekjirka/dailyPhotoTwitterBot | abd490b73603883d4e71bfa6076e9925a055fcb7 | [
"MIT"
]
| 2 | 2019-09-30T18:45:47.000Z | 2021-01-09T10:38:14.000Z | friendly_camera_mapping = {
"GM1913": "Oneplus 7 Pro",
"FC3170": "Mavic Air 2",
# An analogue scanner in FilmNeverDie
"SP500": "Canon AE-1 Program"
}
| 23.714286 | 41 | 0.638554 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.650602 |
6f486f62f9567ab5d28e26f5db6697fa139744ec | 1,622 | py | Python | refined/refinement_types.py | espetro/refined | c2f38418268e8d89634ede1265d869d8d54dc9d4 | [
"MIT"
]
| 4 | 2021-10-04T19:53:04.000Z | 2021-12-17T07:08:42.000Z | refined/refinement_types.py | espetro/refined | c2f38418268e8d89634ede1265d869d8d54dc9d4 | [
"MIT"
]
| null | null | null | refined/refinement_types.py | espetro/refined | c2f38418268e8d89634ede1265d869d8d54dc9d4 | [
"MIT"
]
| null | null | null | from typing_extensions import Annotated, TypeGuard
from typing import TypeVar, List, Set, Dict
from refined.predicates import (
PositivePredicate,
NegativePredicate,
ValidIntPredicate,
ValidFloatPredicate,
EmptyPredicate,
NonEmptyPredicate,
TrimmedPredicate,
IPv4Predicate,
IPv6Predicate,
XmlPredicate,
CsvPredicate
)
__all__ = [
# numeric types
'Positive',
'Negative',
# string types
'TrimmedString',
'ValidIntString',
'ValidFloatString',
'XmlString',
'CsvString',
'IPv4String',
'IPv6String',
# generic collection types
'Empty',
'NonEmpty',
# concrete collection types
'NonEmptyString',
'NonEmptyList',
'NonEmptySet',
'NonEmptyDict',
]
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
Positive = Annotated[_T1, PositivePredicate[_T1]]
Negative = Annotated[_T1, NegativePredicate[_T1]]
TrimmedString = Annotated[str, TrimmedPredicate[str]]
ValidIntString = Annotated[str, ValidIntPredicate[str]]
ValidFloatString = Annotated[str, ValidFloatPredicate[str]]
XmlString = Annotated[str, XmlPredicate[str]]
CsvString = Annotated[str, CsvPredicate[str]]
IPv4String = Annotated[str, IPv4Predicate[str]]
IPv6String = Annotated[str, IPv6Predicate[str]]
Empty = Annotated[_T1, EmptyPredicate[_T1]]
NonEmpty = Annotated[_T1, NonEmptyPredicate[_T1]]
NonEmptyString = Annotated[str, NonEmptyPredicate[str]]
NonEmptyList = Annotated[List[_T1], NonEmptyPredicate[List[_T1]]]
NonEmptySet = Annotated[Set[_T1], NonEmptyPredicate[Set[_T1]]]
NonEmptyDict = Annotated[Dict[_T1, _T2], NonEmptyPredicate[Dict[_T1, _T2]]]
| 25.34375 | 75 | 0.727497 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 281 | 0.173243 |
6f4befaddb9a5f3e1b6a96cd0450bb3e135fa72a | 1,151 | py | Python | setup.py | evamvid/SpotPRIS2 | 4def72c626ac4184fbfb5741ae1f5616f9c34245 | [
"MIT"
]
| null | null | null | setup.py | evamvid/SpotPRIS2 | 4def72c626ac4184fbfb5741ae1f5616f9c34245 | [
"MIT"
]
| null | null | null | setup.py | evamvid/SpotPRIS2 | 4def72c626ac4184fbfb5741ae1f5616f9c34245 | [
"MIT"
]
| null | null | null | from setuptools import setup
with open("README.md", "r") as f:
long_description = f.read()
setup(name="SpotPRIS2",
version='0.3.1',
author="Adrian Freund",
author_email="[email protected]",
url="https://github.com/freundTech/SpotPRIS2",
description="MPRIS2 interface for Spotify Connect",
long_description=long_description,
packages=['spotpris2'],
package_dir={'spotpris2': "spotpris2"},
package_data={'spotpris2': ['mpris/*.xml', 'html/*.html']},
install_requires=[
"PyGObject",
"pydbus",
"spotipy>=2.8",
"appdirs",
],
entry_points={
'console_scripts': ["spotpris2=spotpris2.__main__:main"]
},
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: No Input/Output (Daemon)",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Multimedia :: Sound/Audio",
],
python_requires='>=3.6',
)
| 31.972222 | 66 | 0.569939 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 586 | 0.509123 |
6f4c1702195066e993129a8eb57596bee6bd8234 | 2,371 | py | Python | partycipe/migrations/0001_initial.py | spexxsoldier51/PartyCipe | 5b8038db408fca1e1d568d6520daaf04889ccef0 | [
"CC0-1.0"
]
| null | null | null | partycipe/migrations/0001_initial.py | spexxsoldier51/PartyCipe | 5b8038db408fca1e1d568d6520daaf04889ccef0 | [
"CC0-1.0"
]
| null | null | null | partycipe/migrations/0001_initial.py | spexxsoldier51/PartyCipe | 5b8038db408fca1e1d568d6520daaf04889ccef0 | [
"CC0-1.0"
]
| null | null | null | # Generated by Django 4.0.3 on 2022-04-02 17:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='cocktail',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_updated', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
('id_api', models.PositiveIntegerField()),
],
),
migrations.CreateModel(
name='party',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('paypal', models.URLField()),
('name', models.CharField(max_length=50)),
('resume', models.CharField(max_length=500)),
('place', models.CharField(max_length=150)),
('datehour', models.DateTimeField()),
('last_updated', models.DateTimeField(auto_now=True)),
('price', models.FloatField()),
('drink', models.ManyToManyField(to='partycipe.cocktail')),
('organisate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='participate',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_updated', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
('etat', models.BooleanField()),
('party', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='partycipe.party')),
('utilisateur', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 43.907407 | 126 | 0.578237 | 2,204 | 0.929566 | 0 | 0 | 0 | 0 | 0 | 0 | 307 | 0.129481 |
6f4c2801d9bd553871475afb7b65130adbc0816c | 7,963 | py | Python | raster_statistic.py | Summer0328/DeeplabforRS | e2f7fcee7a226bcf4ac0dfe481e5559cbd86c602 | [
"MIT"
]
| 3 | 2019-02-03T00:11:34.000Z | 2020-12-08T03:12:29.000Z | raster_statistic.py | Summer0328/DeeplabforRS | e2f7fcee7a226bcf4ac0dfe481e5559cbd86c602 | [
"MIT"
]
| null | null | null | raster_statistic.py | Summer0328/DeeplabforRS | e2f7fcee7a226bcf4ac0dfe481e5559cbd86c602 | [
"MIT"
]
| 8 | 2019-03-08T03:20:24.000Z | 2021-12-29T09:12:54.000Z | #!/usr/bin/env python
# Filename: raster_statistic
"""
introduction: conduct statistic based on vectos, similar to https://github.com/perrygeo/python-rasterstats,
# but allow image tiles (multi-raster).
authors: Huang Lingcao
email:[email protected]
add time: 02 March, 2021
"""
import os,sys
import vector_gpd
from shapely.geometry import mapping # transform to GeJSON format
import raster_io
import basic_src.io_function as io_function
import basic_src.map_projection as map_projection
import basic_src.basic as basic
import numpy as np
from multiprocessing import Pool
def array_stats(in_array, stats, nodata,range=None):
data_1d = in_array.flatten()
data_1d = data_1d[ data_1d != nodata]
data_1d = data_1d[~np.isnan(data_1d)] # remove nan value
if range is not None:
lower = range[0]
upper = range[1]
if lower is None:
data_1d = data_1d[data_1d <= upper]
elif upper is None:
data_1d = data_1d[data_1d >= lower]
else:
data_1d = data_1d[np.logical_and( data_1d >= lower, data_1d <= upper ) ]
# https://numpy.org/doc/stable/reference/routines.statistics.html
out_value_dict = {}
for item in stats:
if item == 'mean':
value = np.mean(data_1d)
elif item == 'max':
value = np.max(data_1d)
elif item == 'min':
value = np.min(data_1d)
elif item == 'median':
value = np.median(data_1d)
elif item == 'count':
value = data_1d.size
elif item =='std':
value = np.std(data_1d)
else:
raise ValueError('unsupported stats: %s'%item)
out_value_dict[item] = value
return out_value_dict
def zonal_stats_one_polygon(idx, polygon, image_tiles, img_tile_polygons, stats, nodata=None,range=None,
band = 1,all_touched=True):
overlap_index = vector_gpd.get_poly_index_within_extent(img_tile_polygons, polygon, min_overlap_area=0.01)
image_list = [image_tiles[item] for item in overlap_index]
if len(image_list) == 1:
out_image, out_tran,nodata = raster_io.read_raster_in_polygons_mask(image_list[0], polygon, nodata=nodata,
all_touched=all_touched,bands=band)
elif len(image_list) > 1:
# for the case it overlap more than one raster, need to produce a mosaic
tmp_saved_files = []
for k_img, image_path in enumerate(image_list):
# print(image_path)
tmp_save_path = os.path.splitext(os.path.basename(image_path))[0] + '_subset_poly%d'%idx +'.tif'
_, _,nodata = raster_io.read_raster_in_polygons_mask(image_path, polygon,all_touched=all_touched,nodata=nodata,
bands=band, save_path=tmp_save_path)
tmp_saved_files.append(tmp_save_path)
# mosaic files in tmp_saved_files
save_path = 'raster_for_poly%d.tif'%idx
mosaic_args_list = ['gdal_merge.py', '-o', save_path,'-n',str(nodata),'-a_nodata',str(nodata)]
mosaic_args_list.extend(tmp_saved_files)
if basic.exec_command_args_list_one_file(mosaic_args_list,save_path) is False:
raise IOError('error, obtain a mosaic (%s) failed'%save_path)
# read the raster
out_image, out_nodata = raster_io.read_raster_one_band_np(save_path,band=band)
# remove temporal raster
tmp_saved_files.append(save_path)
for item in tmp_saved_files:
io_function.delete_file_or_dir(item)
else:
basic.outputlogMessage('warning, cannot find raster for %d (start=0) polygon'%idx)
return None
# do calculation
return array_stats(out_image, stats, nodata,range=range)
def zonal_stats_multiRasters(in_shp, raster_file_or_files, nodata=None, band = 1, stats = None, prefix='',
range=None,all_touched=True, process_num=1):
'''
zonal statistic based on vectors, along multiple rasters (image tiles)
Args:
in_shp: input vector file
raster_file_or_files: a raster file or multiple rasters
nodata:
band: band
stats: like [mean, std, max, min]
range: interested values [min, max], None means infinity
all_touched:
process_num: process number for calculation
Returns:
'''
io_function.is_file_exist(in_shp)
if stats is None:
basic.outputlogMessage('warning, No input stats, set to ["mean"])')
stats = ['mean']
if isinstance(raster_file_or_files,str):
io_function.is_file_exist(raster_file_or_files)
image_tiles = [raster_file_or_files]
elif isinstance(raster_file_or_files,list):
image_tiles = raster_file_or_files
else:
raise ValueError('unsupport type for %s'%str(raster_file_or_files))
# check projection (assume we have the same projection), check them outside this function
# get image box
img_tile_boxes = [raster_io.get_image_bound_box(tile) for tile in image_tiles]
img_tile_polygons = [vector_gpd.convert_image_bound_to_shapely_polygon(box) for box in img_tile_boxes]
polygons = vector_gpd.read_polygons_gpd(in_shp)
if len(polygons) < 1:
basic.outputlogMessage('No polygons in %s'%in_shp)
return False
# polygons_json = [mapping(item) for item in polygons] # no need when use new verion of rasterio
# process polygons one by one polygons and the corresponding image tiles (parallel and save memory)
# also to avoid error: daemonic processes are not allowed to have children
if process_num == 1:
stats_res_list = []
for idx, polygon in enumerate(polygons):
out_stats = zonal_stats_one_polygon(idx, polygon, image_tiles, img_tile_polygons, stats, nodata=nodata, range=range,
band=band, all_touched=all_touched)
stats_res_list.append(out_stats)
elif process_num > 1:
threadpool = Pool(process_num)
para_list = [ (idx, polygon, image_tiles, img_tile_polygons, stats, nodata, range,band, all_touched)
for idx, polygon in enumerate(polygons)]
stats_res_list = threadpool.starmap(zonal_stats_one_polygon,para_list)
else:
raise ValueError('Wrong process number: %s '%str(process_num))
# save to shapefile
add_attributes = {}
new_key_list = [ prefix + '_' + key for key in stats_res_list[0].keys()]
for new_ley in new_key_list:
add_attributes[new_ley] = []
for stats_result in stats_res_list:
for key in stats_result.keys():
add_attributes[prefix + '_' + key].append(stats_result[key])
vector_gpd.add_attributes_to_shp(in_shp,add_attributes)
pass
def test_zonal_stats_multiRasters():
shp = os.path.expanduser('~/Data/Arctic/canada_arctic/Willow_River/Willow_River_Thaw_Slumps.shp')
# save_shp = os.path.basename(io_function.get_name_by_adding_tail(shp,'raster_stats'))
# a single DEM
# dem_file_dir = os.path.expanduser('~/Data/Arctic/canada_arctic/DEM/WR_dem_ArcticDEM_mosaic')
# dem_path = os.path.join(dem_file_dir,'WR_extent_2m_v3.0_ArcticTileDEM_sub_1_prj.tif')
# dem patches
dem_file_dir = os.path.expanduser('~/Data/Arctic/canada_arctic/DEM/WR_dem_ArcticDEM_mosaic/dem_patches')
dem_list = io_function.get_file_list_by_ext('.tif',dem_file_dir,bsub_folder=False)
save_shp = os.path.basename(io_function.get_name_by_adding_tail(shp, 'multi_raster_stats'))
io_function.copy_shape_file(shp, save_shp)
zonal_stats_multiRasters(save_shp, dem_list, nodata=None, band=1, stats=None, prefix='dem',
range=None, all_touched=True, process_num=4)
def main():
test_zonal_stats_multiRasters()
pass
if __name__=='__main__':
basic.setlogfile('raster_statistic.log')
main() | 39.034314 | 128 | 0.672485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,253 | 0.282934 |
6f4f6ae38349e41996b32d4d35373858a72fda8b | 1,226 | py | Python | ariadne/old/defutils.py | microns-ariadne/ariadne-pipeline-test-harness | 73e749c48d1ff103fee2044833778e33c70be73b | [
"MIT"
]
| 2 | 2016-03-15T15:07:06.000Z | 2016-05-10T23:01:05.000Z | ariadne/old/defutils.py | microns-ariadne/ariadne-pipeline-test-harness | 73e749c48d1ff103fee2044833778e33c70be73b | [
"MIT"
]
| null | null | null | ariadne/old/defutils.py | microns-ariadne/ariadne-pipeline-test-harness | 73e749c48d1ff103fee2044833778e33c70be73b | [
"MIT"
]
| null | null | null | # Defutils.py -- Contains parsing functions for definition files.
# Produces an organized list of tokens in the file.
def parse(filename):
f=open(filename, "r")
contents=f.read()
f.close()
# Tokenize the file:
#contents=contents.replace('\t', '\n')
lines=contents.splitlines()
outList=[]
for l in lines:
if l[len(l)-1]==':':
outList.append([l.rstrip(':')])
elif l!="":
outList[len(outList)-1].append(l)
return outList
def search(tokList, key):
for tok in tokList:
if tok[0]==key:
return tok
return []
def write(tokList, filename):
f=open(filename, "w")
for tok in tokList:
f.write(tok[0]+":\n")
for i in range(1, len(tok), 1):
f.write(tok[i]+"\n")
f.close()
class InvalidTypeException(Exception):
typestr=""
def __init__(self, value="File cannot be read properly."):
typestr=value
def __str__(self):
return "InvalidTypeException: "+typestr
class DefFormatException(Exception):
typestr=""
def __init__(self, value="Definition format error."):
typestr=value
def __str__(self):
return "DefFormatException: "+typestr
| 22.703704 | 65 | 0.596248 | 411 | 0.335237 | 0 | 0 | 0 | 0 | 0 | 0 | 304 | 0.247961 |
6f501da078c8264f3aa97dd237a4fd33b8efc2d3 | 492 | py | Python | qnap_locate_parser.py | killruana/snippets | c4c63dc61c727cce53fd44175bdf0dbaa6ca2b3e | [
"WTFPL"
]
| null | null | null | qnap_locate_parser.py | killruana/snippets | c4c63dc61c727cce53fd44175bdf0dbaa6ca2b3e | [
"WTFPL"
]
| null | null | null | qnap_locate_parser.py | killruana/snippets | c4c63dc61c727cce53fd44175bdf0dbaa6ca2b3e | [
"WTFPL"
]
| null | null | null | #!/usr/bin/env python3
import json
import sys
import clize
def get_file_handler(input_file):
if input_file is None:
return sys.stdin
return open(input_file, 'r')
@clize.clize
def main(input_file=None):
result = {'datas': []}
with get_file_handler(input_file) as input_handler:
result = json.load(input_handler)
for data in result['datas']:
print(data['filename'])
if __name__ == '__main__':
clize.run(main)
| 17.571429 | 56 | 0.628049 | 0 | 0 | 0 | 0 | 237 | 0.481707 | 0 | 0 | 60 | 0.121951 |
6f51e1b0451d36b7c6fa181d10bcac54b6aff907 | 2,254 | py | Python | evennia/contrib/tutorial_examples/mirror.py | lootcrawl/evennia | a5f736ca0ff89e4f7da7d3f89a8886f1ea3698aa | [
"BSD-3-Clause"
]
| null | null | null | evennia/contrib/tutorial_examples/mirror.py | lootcrawl/evennia | a5f736ca0ff89e4f7da7d3f89a8886f1ea3698aa | [
"BSD-3-Clause"
]
| null | null | null | evennia/contrib/tutorial_examples/mirror.py | lootcrawl/evennia | a5f736ca0ff89e4f7da7d3f89a8886f1ea3698aa | [
"BSD-3-Clause"
]
| null | null | null | """
TutorialMirror
A simple mirror object to experiment with.
"""
from evennia import DefaultObject
from evennia.utils import make_iter, is_iter
from evennia import logger
class TutorialMirror(DefaultObject):
"""
A simple mirror object that
- echoes back the description of the object looking at it
- echoes back whatever is being sent to its .msg - to the
sender, if given, otherwise to the location of the mirror.
"""
def return_appearance(self, looker, **kwargs):
"""
This formats the description of this object. Called by the 'look' command.
Args:
looker (Object): Object doing the looking.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
if isinstance(looker, self.__class__):
# avoid infinite recursion by having two mirrors look at each other
return "The image of yourself stretches into infinity."
return f"{self.key} shows your reflection:\n{looker.db.desc}"
def msg(self, text=None, from_obj=None, **kwargs):
"""
Simply override .msg to echo back to the messenger or to the current
location.
Args:
text (str or tuple, optional): The message to send. This
is treated internally like any send-command, so its
value can be a tuple if sending multiple arguments to
the `text` oob command.
from_obj (obj or iterable)
given, at_msg_send will be called. This value will be
passed on to the protocol. If iterable, will execute hook
on all entities in it.
"""
if not text:
text = "<silence>"
text = text[0] if is_iter(text) else text
if from_obj:
for obj in make_iter(from_obj):
obj.msg(f'{self.key} echoes back to you:\n"{text}".')
elif self.location:
self.location.msg_contents(f'{self.key} echoes back:\n"{text}".', exclude=[self])
else:
# no from_obj and no location, just log
logger.log_msg(f"{self.key}.msg was called without from_obj and .location is None.")
| 35.777778 | 96 | 0.612689 | 2,076 | 0.921029 | 0 | 0 | 0 | 0 | 0 | 0 | 1,556 | 0.690328 |
6f52486edca89f5433834a3b9b6ee311e8cbfc7a | 1,087 | py | Python | python/patternlock.py | Floozutter/silly | 8273b4a33e2001c0a530e859c12dbc30b9590a94 | [
"Unlicense"
]
| null | null | null | python/patternlock.py | Floozutter/silly | 8273b4a33e2001c0a530e859c12dbc30b9590a94 | [
"Unlicense"
]
| null | null | null | python/patternlock.py | Floozutter/silly | 8273b4a33e2001c0a530e859c12dbc30b9590a94 | [
"Unlicense"
]
| null | null | null | from tkinter import Tk
from turtle import ScrolledCanvas, TurtleScreen, RawTurtle
DIGIT2POS = dict(zip(
"123456789",
((100 * (j - 1), 100 * (-i + 1)) for i in range(3) for j in range(3))
))
def draw_dots(turt: RawTurtle) -> None:
penstate = turt.pen()
turt.penup()
for x, y in DIGIT2POS.values():
turt.setheading(turt.towards(x, y))
turt.goto(x, y)
turt.dot()
turt.pen(pen = penstate)
def draw_pattern(turt: RawTurtle, pattern: str) -> None:
penstate = turt.pen()
turt.penup()
for x, y in map(lambda digit: DIGIT2POS[digit], pattern):
turt.setheading(turt.towards(x, y))
turt.goto(x, y)
turt.pendown()
turt.dot()
turt.pen(pen = penstate)
def main(pattern: str) -> None:
master = Tk()
canvas = ScrolledCanvas(master)
canvas.pack()
screen = TurtleScreen(canvas)
screen.colormode(255)
turt = RawTurtle(screen)
draw_dots(turt)
turt.pencolor((178, 34, 34))
draw_pattern(turt, pattern)
screen.mainloop()
if __name__ == "__main__":
main("61834927")
| 25.880952 | 73 | 0.618215 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.028519 |
6f52a901e875d32b20f9451889c4b2196619f283 | 3,879 | py | Python | synthesizing/gui/python-portmidi-0.0.7/test_pyportmidi.py | Chiel92/MusicTheory | ddaaa60042c2db3522144e90ceabcd1bbd9818c3 | [
"MIT"
]
| null | null | null | synthesizing/gui/python-portmidi-0.0.7/test_pyportmidi.py | Chiel92/MusicTheory | ddaaa60042c2db3522144e90ceabcd1bbd9818c3 | [
"MIT"
]
| null | null | null | synthesizing/gui/python-portmidi-0.0.7/test_pyportmidi.py | Chiel92/MusicTheory | ddaaa60042c2db3522144e90ceabcd1bbd9818c3 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# test code for PyPortMidi
# a port of a subset of test.c provided with PortMidi
# John Harrison
# harrison [at] media [dot] mit [dot] edu
# March 15, 2005: accommodate for SysEx messages and preferred list formats
# SysEx test code contributed by Markus Pfaff
# February 27, 2005: initial release
import pypm
import array
import time
NUM_MSGS = 100 # number of MIDI messages for input before closing
INPUT=0
OUTPUT=1
def PrintDevices(InOrOut):
for loop in range(pypm.CountDevices()):
interf,name,inp,outp,opened = pypm.GetDeviceInfo(loop)
if ((InOrOut == INPUT) & (inp == 1) |
(InOrOut == OUTPUT) & (outp ==1)):
print loop, name," ",
if (inp == 1): print "(input) ",
else: print "(output) ",
if (opened == 1): print "(opened)"
else: print "(unopened)"
print
def TestInput():
PrintDevices(INPUT)
dev = int(raw_input("Type input number: "))
MidiIn = pypm.Input(dev)
print "Midi Input opened. Reading ",NUM_MSGS," Midi messages..."
# MidiIn.SetFilter(pypm.FILT_ACTIVE | pypm.FILT_CLOCK)
for cntr in range(1,NUM_MSGS+1):
while not MidiIn.Poll(): pass
MidiData = MidiIn.Read(1) # read only 1 message at a time
print "Got message ",cntr,": time ",MidiData[0][1],", ",
print MidiData[0][0][0]," ",MidiData[0][0][1]," ",MidiData[0][0][2], MidiData[0][0][3]
# NOTE: most Midi messages are 1-3 bytes, but the 4 byte is returned for use with SysEx messages.
del MidiIn
def TestOutput():
latency = int(raw_input("Type latency: "))
print
PrintDevices(OUTPUT)
dev = int(raw_input("Type output number: "))
MidiOut = pypm.Output(dev, latency)
print "Midi Output opened with ",latency," latency"
dummy = raw_input("ready to send program 1 change... (type RETURN):")
MidiOut.Write([[[0xc0,0,0],pypm.Time()]])
dummy = raw_input("ready to note-on... (type RETURN):")
MidiOut.Write([[[0x90,60,100],pypm.Time()]])
dummy = raw_input("read to note-off... (type RETURN):")
MidiOut.Write([[[0x90,60,0],pypm.Time()]])
dummy = raw_input("ready to note-on (short form)... (type RETURN):")
MidiOut.WriteShort(0x90,60,100)
dummy = raw_input("ready to note-off (short form)... (type RETURN):")
MidiOut.WriteShort(0x90,60,0)
print
print "chord will arpeggiate if latency > 0"
dummy = raw_input("ready to chord-on/chord-off... (type RETURN):")
chord = [60, 67, 76, 83, 90]
ChordList = []
MidiTime = pypm.Time()
for i in range(len(chord)):
ChordList.append([[0x90,chord[i],100], MidiTime + 1000 * i])
MidiOut.Write(ChordList)
while pypm.Time() < MidiTime + 1000 + len(chord) * 1000 : pass
ChordList = []
# seems a little odd that they don't update MidiTime here...
for i in range(len(chord)):
ChordList.append([[0x90,chord[i],0], MidiTime + 1000 * i])
MidiOut.Write(ChordList)
print("Sending SysEx messages...")
# sending with timestamp = 0 should be the same as sending with
# timestamp = pypm.Time()
dummy = raw_input("ready to send a SysEx string with timestamp = 0 ... (type RETURN):")
MidiOut.WriteSysEx(0,'\xF0\x7D\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\xF7')
dummy = raw_input("ready to send a SysEx list with timestamp = pypm.Time() ... (type RETURN):")
MidiOut.WriteSysEx(pypm.Time(), [0xF0, 0x7D, 0x10, 0x11, 0x12, 0x13, 0xF7])
dummy = raw_input("ready to close and terminate... (type RETURN):")
del MidiOut
# main code begins here
pypm.Initialize() # always call this first, or OS may crash when you try to open a stream
x=0
while (x<1) | (x>2):
print """
enter your choice...
1: test input
2: test output
"""
x=int(raw_input())
if x==1: TestInput()
else: TestOutput()
pypm.Terminate()
| 38.405941 | 105 | 0.63109 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,672 | 0.431039 |
6f541abd19a1111ebb51682e3f0933b11c46ab1e | 1,043 | py | Python | cnn_model/Compute_accuarcy.py | csJd/dg_text_contest_2018 | 24bf8ded51841ebc1b1487f239471d65ee1a2b18 | [
"MIT"
]
| null | null | null | cnn_model/Compute_accuarcy.py | csJd/dg_text_contest_2018 | 24bf8ded51841ebc1b1487f239471d65ee1a2b18 | [
"MIT"
]
| null | null | null | cnn_model/Compute_accuarcy.py | csJd/dg_text_contest_2018 | 24bf8ded51841ebc1b1487f239471d65ee1a2b18 | [
"MIT"
]
| null | null | null | #coding=utf-8
import pandas as pd
def get_labels(init_file,predict_file):
init_label = []
predict_label = []
pd_init = pd.read_csv(init_file,sep="^",header=None)
for index,row in pd_init.iterrows():
init_label.append(row[0])
pd_predict = pd.read_csv(predict_file,sep=",",header=None)
for index,row in pd_predict.iterrows():
predict_label.append(row[0])
print(predict_label)
print(init_label)
correct_count = 0
error_index = []
for i in range(len(init_label)):
if init_label[i] == predict_label[i]:
correct_count += 1
else:
error_index.append(i)
print("correct_count : "+str(correct_count))
correct_rate = correct_count / len(pd_predict)
return correct_rate,error_index
if __name__ == "__main__":
correct_rate,error_index = get_labels("../processed_data/dev_processed_data_split.csv","./result/result_predict.txt")
print("correct_rate : "+str(correct_rate))
print("error_email : "+str(error_index))
| 21.729167 | 121 | 0.663471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 158 | 0.151486 |
6f54410348912d7242d35c3b0676a8ad9e832eda | 1,219 | py | Python | ex01/arquivo/__init__.py | duartele/exerc-python | fc149a5bcd0686ba4cad87e95277658f9bbdc63b | [
"MIT"
]
| null | null | null | ex01/arquivo/__init__.py | duartele/exerc-python | fc149a5bcd0686ba4cad87e95277658f9bbdc63b | [
"MIT"
]
| null | null | null | ex01/arquivo/__init__.py | duartele/exerc-python | fc149a5bcd0686ba4cad87e95277658f9bbdc63b | [
"MIT"
]
| null | null | null | from ex01.funcoes import *
def arqExiste(nome):
try:
a = open(nome, 'rt') #rt = read text
a.close()
except FileNotFoundError:
return False
else:
return True
def criarArq(nome):
try:
a = open(nome, 'wt+') #wt = write text and + = create one if it not exists
a.close()
except:
print('Hove um erro na criacao do arquivo')
else:
print('Arquivo criado com sucesso')
def lerArq(nome):
try:
a = open(nome, 'rt')
except:
print('Hove um erro na leitura do arquivo')
else:
cabecalho('PESSOAS CADASTRADAS')
for linha in a:
dado = linha.split(';')
dado[1] = dado[1].replace('\n', '')
print(f'{dado[0]:<30}{dado[1]:>3} anos')
finally:
a.close()
def cadastrar(arquivo, nome='desconhecido', idade=0):
try:
a = open(arquivo, 'at') # at = append no txt
except:
print('Houve um erro ao cadastrar')
else:
try:
a.write(f'{nome};{idade}\n')
except:
print('Houve erro ao executar a.write')
else:
print('Novo registro adicionado com sucesso')
a.close()
| 23 | 82 | 0.528302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 398 | 0.326497 |
6f54793f102a2f9346990845e8357d9f1db537d3 | 4,330 | py | Python | ck_airport.py | 58565856/checkinpanel | 58f2292d9c4d65f15ffd6bc4fa4b9f23214d3d72 | [
"MIT"
]
| 3 | 2022-02-08T16:11:43.000Z | 2022-03-23T16:18:59.000Z | ck_airport.py | 58565856/checkinpanel | 58f2292d9c4d65f15ffd6bc4fa4b9f23214d3d72 | [
"MIT"
]
| null | null | null | ck_airport.py | 58565856/checkinpanel | 58f2292d9c4d65f15ffd6bc4fa4b9f23214d3d72 | [
"MIT"
]
| 2 | 2022-02-01T05:35:56.000Z | 2022-02-10T01:37:38.000Z | # -*- coding: utf-8 -*-
"""
:author @Icrons
cron: 20 10 * * *
new Env('机场签到');
"""
import json
import re
import traceback
import requests
import urllib3
from notify_mtr import send
from utils import get_data
urllib3.disable_warnings()
class SspanelQd(object):
def __init__(self, check_items):
self.check_items = check_items
@staticmethod
def checkin(url, email, password):
url = url.rstrip("/")
email = email.split("@")
if len(email) > 1:
email = email[0] + "%40" + email[1]
else:
email = email[0]
session = requests.session()
"""
以下 except 都是用来捕获当 requests 请求出现异常时,
通过捕获然后等待网络情况的变化,以此来保护程序的不间断运行
"""
try:
session.get(url, verify=False)
except requests.exceptions.ConnectionError:
msg = url + "\n" + "网络不通"
return msg
except requests.exceptions.ChunkedEncodingError:
msg = url + "\n" + "分块编码错误"
return msg
except Exception:
msg = url + "\n" + "未知错误,请查看日志"
print(f"未知错误,错误信息:\n{traceback.format_exc()}")
return msg
login_url = url + "/auth/login"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
}
post_data = "email=" + email + "&passwd=" + password + "&code="
post_data = post_data.encode()
try:
res = session.post(login_url, post_data, headers=headers, verify=False)
res_str = res.text.encode("utf-8").decode("unicode_escape")
print(f"{url} 接口登录返回信息:{res_str}")
res_dict = json.loads(res_str)
if res_dict.get("ret") == 0:
msg = url + "\n" + str(res_dict.get("msg"))
return msg
except Exception:
msg = url + "\n" + "登录失败,请查看日志"
print(f"登录失败,错误信息:\n{traceback.format_exc()}")
return msg
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
"Referer": url + "/user",
}
try:
response = session.post(
url + "/user/checkin", headers=headers, verify=False
)
res_str = response.text.encode("utf-8").decode("unicode_escape")
print(f"{url} 接口签到返回信息:{res_str}")
res_dict = json.loads(res_str)
check_msg = res_dict.get("msg")
if check_msg:
msg = url + "\n" + str(check_msg)
else:
msg = url + "\n" + str(res_dict)
except Exception:
msg = url + "\n" + "签到失败,请查看日志"
print(f"签到失败,错误信息:\n{traceback.format_exc()}")
info_url = url + "/user"
response = session.get(info_url, verify=False)
"""
以下只适配了editXY主题
"""
try:
level = re.findall(r'\["Class", "(.*?)"],', response.text)[0]
day = re.findall(r'\["Class_Expire", "(.*)"],', response.text)[0]
rest = re.findall(r'\["Unused_Traffic", "(.*?)"]', response.text)[0]
msg = (
url
+ "\n- 今日签到信息:"
+ str(msg)
+ "\n- 用户等级:"
+ str(level)
+ "\n- 到期时间:"
+ str(day)
+ "\n- 剩余流量:"
+ str(rest)
)
except Exception:
pass
return msg
def main(self):
msg_all = ""
for check_item in self.check_items:
# 机场地址
url = str(check_item.get("url"))
# 登录信息
email = str(check_item.get("email"))
password = str(check_item.get("password"))
if url and email and password:
msg = self.checkin(url=url, email=email, password=password)
else:
msg = "配置错误"
msg_all += msg + "\n\n"
return msg_all
if __name__ == "__main__":
data = get_data()
_check_items = data.get("AIRPORT", [])
res = SspanelQd(check_items=_check_items).main()
send("机场签到", res)
| 31.151079 | 138 | 0.505081 | 4,271 | 0.90911 | 0 | 0 | 3,643 | 0.775436 | 0 | 0 | 1,498 | 0.318859 |
6f55278da18ee1b87b293e3ecbf2009597eacc92 | 2,541 | py | Python | src/04_exploration/03_determine_fire_season.py | ranarango/fuegos-orinoquia | d82941ef0c90fe66162c8678b6f4a4c010d4313b | [
"MIT"
]
| null | null | null | src/04_exploration/03_determine_fire_season.py | ranarango/fuegos-orinoquia | d82941ef0c90fe66162c8678b6f4a4c010d4313b | [
"MIT"
]
| null | null | null | src/04_exploration/03_determine_fire_season.py | ranarango/fuegos-orinoquia | d82941ef0c90fe66162c8678b6f4a4c010d4313b | [
"MIT"
]
| null | null | null | # -----------------------------------------------------------------------
# Author: Marcelo Villa-Piñeros
#
# Purpose: Determines the fire season for each window. The fire season is
# defined as the minimum number of consecutive months that contain more
# than 80% of the burned area (Archibald ett al 2013; Abatzoglou et al.
# 2018).
#
# References:
# * Archibald, S., Lehmann, C. E. R., Gómez-Dans, J. L., & Bradstock,
# R. A. (2013). Defining pyromes and global syndromes of fire regimes.
# Proceedings of the National Academy of Sciences of the United States
# of America, 110(16), 6442–6447.
#
# * Abatzoglou, J. T., Williams, A. P., Boschetti, L., Zubkova, M., &
# Kolden, C. A. (2018). Global patterns of interannual climate–fire
# relationships. Global Change Biology, 24(11), 5164–5175.
# -----------------------------------------------------------------------
import os
from calendar import month_abbr
import pandas as pd
from src.utils.constants import REGIONS, BURNED_AREA_THRESHOLD
if __name__ == "__main__":
# Project's root
os.chdir("../..")
output_folder = "results/csv"
if not os.path.exists(output_folder):
os.makedirs(output_folder)
df = pd.DataFrame(columns=["window", "months"])
for region in REGIONS:
month_groups = pd.read_excel(
f"results/xlsx/{region['name']}/fire_groups.xlsx", sheet_name="Month"
)
# Compute 80% threshold.
threshold = month_groups["area"].sum() * BURNED_AREA_THRESHOLD
# Sort months from larger to smallest burned area and compute the
# cumulative sum.
sorted_groups = month_groups.sort_values(by="area", ascending=False)
sorted_groups = sorted_groups.reset_index(drop=True)
sorted_groups["cumulative_area"] = sorted_groups["area"].cumsum()
# Get the months with the largest burned area that compose more
# than 80% of the total burned area and change from month int to
# month abbreviation.
above_threshold = sorted_groups["cumulative_area"] >= threshold
fire_season_months = sorted_groups["month"].loc[:above_threshold.idxmax()]
fire_season_months = fire_season_months.sort_values()
fire_season_months = fire_season_months.apply(lambda x: month_abbr[x])
months = fire_season_months.str.cat(sep="-")
df = df.append({"window": region["name"], "months": months}, ignore_index=True)
save_to = os.path.join(output_folder, "fire_season_months.csv")
df.to_csv(save_to, index=False)
| 38.5 | 87 | 0.646989 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,350 | 0.529619 |
6f5611d11711ac2d42a20770b0203d11ed9c22de | 5,719 | py | Python | holo/modules/blender.py | chinarjoshi/holo | 45da9a8b4186b405d4f7338b953e10b335b76573 | [
"MIT"
]
| 1 | 2021-08-01T02:26:59.000Z | 2021-08-01T02:26:59.000Z | holo/modules/blender.py | chinarjoshi/holo | 45da9a8b4186b405d4f7338b953e10b335b76573 | [
"MIT"
]
| null | null | null | holo/modules/blender.py | chinarjoshi/holo | 45da9a8b4186b405d4f7338b953e10b335b76573 | [
"MIT"
]
| null | null | null | import bpy
import json
from bpy.types import SpaceView3D
from bpy.app.handlers import persistent
from mathutils import Quaternion, Matrix, Vector
from holo.gestures import prediction_from_camera
def duplicate_window(window_type: str = 'INVOKE_DEFAULT') -> None:
"""Duplicates a new window into bpy.data.screens from current active window."""
context_window = bpy.context.copy()
context_window['area'] = [area for area in bpy.context.screen.areas if area.type == 'VIEW_3D'][0]
bpy.ops.screen.area_dupli(context_window, window_type)
def convert_quadview(area: SpaceView3D) -> None:
"""Converts a given window into quad-view."""
region = [region for region in RENDER_AREA.regions if region.type == 'WINDOW'][0]
override = {'area': RENDER_AREA, 'region': region, 'edit_object': bpy.context.edit_object}
bpy.ops.screen.region_quadview(override)
def configure_scene(screen_data: SpaceView3D) -> None:
"""Removes all overlay elements from the 3D viewport."""
screen_data.shading.background_type = 'VIEWPORT'
screen_data.shading.background_color = (0, 0, 0)
screen_data.overlay.show_overlays = False
for attribute in 'show_gizmo', 'show_region_toolbar', 'show_region_tool_header':
setattr(screen_data, attribute, False)
def initial_config(values: list) -> None:
"""Sets the camera position and rotation values during initialization of new frame."""
for index, window in enumerate(values):
for key, attribute in window.items():
if key not in {'perspective_matrix', 'window_matrix'}: # BUG These values are read only and need a setter
setattr(QUAD_VIEWS[index], key, attribute)
def transform_rotate(direction: 'str', confidence: int) -> None:
"""Given a direction and confidence value (Out of 100%), rotate the object by its corresponding vector."""
magnitude = confidence / 100
if direction not in {'retract', 'expand'}:
bpy.ops.transform.rotate(
value=magnitude,
orient_axis='Z',
orient_type='VIEW',
orient_matrix=((0.85153, 0.277963, -0.44456),
(0.15535, 0.676067, 0.720278),
(0.500763, -0.6824, 0.53251)),
orient_matrix_type='VIEW',
mirror=True, use_proportional_edit=False,
proportional_edit_falloff='SMOOTH',
proportional_size=1,
use_proportional_connected=False,
use_proportional_projected=False)
else:
for window in QUAD_VIEWS:
window.view_distance += magnitude if direction == 'expand' else magnitude * -1
def get_gestures() -> None:
"""Retrieves gestures from camera and applies the corresponding tranformation to the object."""
rotation_mapping = {
'Fist' : 'X',
'L' : 'Y',
'Okay' : 'Z',
}
for gesture in prediction_from_camera():
transform_rotate(direction=rotation_mapping(gesture.gesture), magnitude=gesture.confidence)
def initial_config_values() -> list:
"""Returns initial config values as a convenience utility."""
return [
{
"view_distance": 4.183098793029785,
"view_location": Vector((-0.8385156989097595, 0.05902576446533203, 0.48941677808761597)),
"view_perspective": "PERSP",
"view_rotation": Quaternion((0.6414357423782349, -0.6326250433921814, 0.3170725703239441, 0.2963286340236664))
},
{
"view_distance": 4.183099269866943,
"view_location": Vector((-0.4491613209247589, 1.5609432458877563, 0.014791678637266159)),
"view_perspective": "PERSP",
"view_rotation": Quaternion((0.4915403723716736, 0.6154682636260986, -0.25714513659477234, -0.559877872467041)),
},
{
"view_distance": 5.019718647003174,
"view_location": Vector((-0.9179283380508423, -0.46830159425735474, 0.334771990776062)),
"view_perspective": "PERSP",
"view_rotation": Quaternion((-0.22622741758823395, 0.6814441084861755, -0.1789524108171463, 0.6726300716400146))
},
{
"view_distance": 5.019718647003174,
"view_location": Vector((0.797123372554779, 0.7804675102233887, 0.635741114616394)),
"view_perspective": "PERSP",
"view_rotation": Quaternion((0.687656581401825, 0.6367506384849548, -0.2974682152271271, 0.1821804791688919))
}
]
if __name__ == '__main__':
duplicate_window()
RENDER_AREA = bpy.data.window_managers[0].windows[-1].screen.areas[0]
MAIN_VIEW = [area for area in bpy.data.window_managers[0].windows[0].screen.areas if area.type == 'VIEW_3D'][0].spaces[0].region_3d
QUAD_VIEWS = RENDER_AREA.spaces[0].region_quadviews
convert_quadview(area=RENDER_AREA)
configure_scene(screen_data=RENDER_AREA.spaces[0])
initial_config(initial_config_values())
get_gestures()
# bpy.data.window_managers[0].windows[1].screen.areas[0].spaces[0].region_3d.view_rotation.rotate(Euler((1, 10, .1)))
for window in bpy.data.window_managers[0].windows: # let's find what's what
for area in window.screen.areas:
if area.type == 'VIEW_3D':
if len(area.spaces[0].region_quadviews) > 0: #if quadviews are active
quad_views = area.spaces[0].region_quadviews
else:
main_view = area.spaces[0].region_3d
@persistent # This makes it stay if another file is opened
def update_handler(dummy):
for every_view in QUAD_VIEWS:
every_view.view_location = MAIN_VIEW.view_location
every_view.view_distance = MAIN_VIEW.view_distance
bpy.app.handlers.frame_change_post.append(update_handler)
| 43.656489 | 135 | 0.673719 | 0 | 0 | 0 | 0 | 237 | 0.041441 | 0 | 0 | 1,343 | 0.234831 |
6f57b93666fc12f3542b15b4104bbd2e0df4bc2a | 2,506 | py | Python | ncp/models/det_mix_ncp.py | JoeMWatson/ncp | 705634393cc5b739323009aaa3ad0bd02f540728 | [
"Apache-2.0"
]
| 2 | 2020-10-21T23:54:28.000Z | 2020-12-26T14:00:07.000Z | ncp/models/det_mix_ncp.py | JoeMWatson/ncp | 705634393cc5b739323009aaa3ad0bd02f540728 | [
"Apache-2.0"
]
| null | null | null | ncp/models/det_mix_ncp.py | JoeMWatson/ncp | 705634393cc5b739323009aaa3ad0bd02f540728 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow_probability import distributions as tfd
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from ncp import tools
def network(inputs, config):
hidden = inputs
for size in config.layer_sizes:
hidden = tf.layers.dense(hidden, size, tf.nn.leaky_relu)
mean = tf.layers.dense(hidden, 1)
noise = tf.layers.dense(hidden, 1, tf.nn.softplus) + 1e-6
uncertainty = tf.layers.dense(hidden, 1, None)
return mean, noise, uncertainty
def define_graph(config):
network_tpl = tf.make_template('network', network, config=config)
inputs = tf.placeholder(tf.float32, [None, config.num_inputs])
targets = tf.placeholder(tf.float32, [None, 1])
num_visible = tf.placeholder(tf.int32, [])
batch_size = tf.to_float(tf.shape(inputs)[0])
data_mean, data_noise, data_uncertainty = network_tpl(inputs)
ood_inputs = inputs + tf.random_normal(
tf.shape(inputs), 0.0, config.noise_std)
ood_mean, ood_noise, ood_uncertainty = network_tpl(ood_inputs)
losses = [
-tfd.Normal(data_mean, data_noise).log_prob(targets),
-tfd.Bernoulli(data_uncertainty).log_prob(0),
-tfd.Bernoulli(ood_uncertainty).log_prob(1),
]
if config.center_at_target:
losses.append(-tfd.Normal(ood_mean, ood_noise).log_prob(targets))
loss = sum(tf.reduce_sum(loss) for loss in losses) / batch_size
optimizer = tf.train.AdamOptimizer(config.learning_rate)
gradients, variables = zip(*optimizer.compute_gradients(
loss, colocate_gradients_with_ops=True))
if config.clip_gradient:
gradients, _ = tf.clip_by_global_norm(gradients, config.clip_gradient)
optimize = optimizer.apply_gradients(zip(gradients, variables))
data_uncertainty = tf.sigmoid(data_uncertainty)
if not config.center_at_target:
data_mean = (1 - data_uncertainty) * data_mean + data_uncertainty * 0
data_noise = (1 - data_uncertainty) * data_noise + data_uncertainty * 0.1
return tools.AttrDict(locals())
| 41.081967 | 75 | 0.7502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 571 | 0.227853 |
6f59ecc61ca13580a763007d05b1e7a17bc242cb | 165 | py | Python | api/models/target.py | zanachka/proxy-service | 769e263606a6e520efdfe7d119bb717487d0e27e | [
"MIT"
]
| 1 | 2020-08-08T17:06:43.000Z | 2020-08-08T17:06:43.000Z | api/models/target.py | zanachka/proxy-service | 769e263606a6e520efdfe7d119bb717487d0e27e | [
"MIT"
]
| 2 | 2021-03-30T01:01:59.000Z | 2021-03-30T01:01:59.000Z | api/models/target.py | zanachka/proxy-service | 769e263606a6e520efdfe7d119bb717487d0e27e | [
"MIT"
]
| 4 | 2020-12-22T18:13:24.000Z | 2021-11-26T13:03:45.000Z | """
DB operations for Targets
"""
from api.models.base import DBModel
class TargetDB(DBModel):
'''DBModel for the targets table'''
tablename = 'targets'
| 13.75 | 39 | 0.684848 | 91 | 0.551515 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.466667 |
6f5a67d0b0a7c3b0ab3f8a01d7d7b783ef80e5c4 | 15,107 | py | Python | myTeam.py | alexrichardson21/PacmanDQNAgent | 7a7aff6f8fa80c0e00e107adb07380194e2fc2d3 | [
"MIT"
]
| null | null | null | myTeam.py | alexrichardson21/PacmanDQNAgent | 7a7aff6f8fa80c0e00e107adb07380194e2fc2d3 | [
"MIT"
]
| null | null | null | myTeam.py | alexrichardson21/PacmanDQNAgent | 7a7aff6f8fa80c0e00e107adb07380194e2fc2d3 | [
"MIT"
]
| null | null | null | # myTeam.py
# ---------
# Licensing Infoesmation: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel ([email protected]).
# TO DISCUSS:
# Walkthru
# Replay Func
# Agent state vs position
# Normalizing state values
# Actions vs. Legal Actions
# Reward Func
import random
import time
import math
import json
import os
from util import nearestPoint
from collections import deque
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from game import Directions
from captureAgents import CaptureAgent
#################
# Team creation #
#################
def createTeam(firstIndex, secondIndex, isRed,
first='OffDQNAgent', second='DefDQNAgent'):
"""
This function should return a list of two agents that will form the
team, initialized using firstIndex and secondIndex as their agent
index numbers. isRed is True if the red team is being created, and
will be False if the blue team is being created.
As a potentially helpful development aid, this function can take
additional string-valued keyword arguments ("first" and "second" are
such arguments in the case of this function), which will come from
the --redOpts and --blueOpts command-line arguments to capture.py.
For the nightly contest, however, your team will be created without
any extra arguments, so you should make sure that the default
behavior is what you want for the nightly contest.
"""
# The following line is an example only; feel free to change it.
return [eval(first)(firstIndex), eval(second)(secondIndex)]
##########
# Agents #
##########
class DQNAgent(CaptureAgent):
def registerInitialState(self, gs):
"""
This method handles the initial setup of the
agent to populate useful fields (such as what team
we're on).
A distanceCalculator instance caches the maze distances
between each pair of positions, so your agents can use:
self.distancer.getDistance(p1, p2)
IMPORTANT: This method may run for at most 15 seconds.
"""
'''
Make sure you do not delete the following line. If you would like to
use Manhattan distances instead of maze distances in order to save
on initialization time, please take a look at
CaptureAgent.registerInitialState in captureAgents.py.
'''
'''
Your initialization code goes here, if you need any.
'''
print("REGISTERING INITIAL STATE... \n\n")
train = True
self.EPISODES = 10000
self.memory = deque(maxlen=2000)
self.alpha = 0.05
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.05
self.epsilon_decay = 0.999
self.learning_rate = 0.002
self.epsilon = self.epsilon_min
self.start = gs.getAgentPosition(self.index)
CaptureAgent.registerInitialState(self, gs)
self.actions = ['Stop', 'North', 'South', 'East', 'West']
cols = len(gs.data.layout.layoutText[0])
rows = len(gs.data.layout.layoutText)
self.input_shape = rows*cols
self.output_shape = len(self.actions)
if os.path.exists('DQNAgent%d.h5' % self.index):
self.model.load_weights("agent%d.h5" % self.index)
else:
self.model = self._build_model()
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
model.add(Dense(64, input_dim=self.input_shape))
model.add(Dense(32))
model.add(Dense(self.output_shape, activation='linear'))
model.compile(loss='mse',
optimizer=Adam(lr=self.learning_rate))
return model
# DEPRECATED
def train(self, gs):
batch_size = 32
print("Beginning training ...")
for e in range(self.EPISODES):
state = gs.getAgentState(self.index)
legal_actions = gs.getLegalActions(self.index)
best_index = self.act(gs)
best_action = self.chooseAction(gs)
next_gs = self.getSuccessor(gs, best_action)
next_state = next_gs.getAgentState(self.index)
reward = self.getReward(next_gs, gs)
self.remember(gs, best_index, reward, next_gs)
with open("memory.json", "w") as write_file:
json.dump((self.index,
gs.getAgentPosition(self.index),
best_action, reward,
next_gs.getAgentPosition(self.index)
), write_file)
gs = next_gs
if len(self.memory) > batch_size:
self.replay(batch_size)
if (e % 100 == 0):
print("Episode: %d" % e)
self.model.save_weights("agent%d.h5" % self.index)
print('Finished Training!')
def remember(self, state, action, reward, next_state):
self.memory.append((state, action, reward, next_state))
def replay(self, batch_size):
# Samples random memories of batch_size
minibatch = random.sample(self.memory, batch_size)
# For each memory
avg_loss = []
for gs, action, reward, next_gs in minibatch:
state = gs.getAgentState(self.index)
next_state = next_gs.getAgentState(self.index)
# Update to q value
gs_q_vals = self.model.predict(self.preprocessGS(gs))
best_q_val = np.amax(gs_q_vals[0])
next_best_q_val = np.amax(
self.model.predict(self.preprocessGS(next_gs))[0])
diff = (reward + self.gamma * next_best_q_val) - best_q_val
gs_q_vals[0][self.actions.index(action)] = diff
loss = self.model.fit(self.preprocessGS(gs),
gs_q_vals, epochs=1, verbose=0)
avg_loss += loss.history['loss']
# print("Replay Avg Loss: " + str(np.average(avg_loss)))
# Decrease epsilon
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def getSuccessor(self, gs, action):
"""
Finds the next successor which is a grid position (location tuple).
"""
successor = gs.generateSuccessor(self.index, action)
pos = successor.getAgentState(self.index).getPosition()
if pos != nearestPoint(pos):
# Only half a grid position was covered
return successor.generateSuccessor(self.index, action)
else:
return successor
def chooseAction(self, gs):
"""
Picks among actions randomly.
"""
# state = gs.getAgentPosition(self.index)
# actions = gs.getLegalActions(self.index)
'''
You should change this in your own agent.
'''
"*** YOUR CODE HERE ***"
batch_size = 16
# Update memory if possible
last_gs = self.getPreviousObservation()
if last_gs:
next_gs = self.getCurrentObservation()
if next_gs.data.timeleft <= 5:
self.model.save('DQNAgent%d.h5' % self.index)
reward = self.getReward(gs, last_gs)
action = self.getDirection(last_gs.getAgentPosition(
self.index), gs.getAgentPosition(self.index))
self.memory.append((last_gs, action, reward, gs))
with open("memory.json", "w") as write_file:
json.dump((self.index,
last_gs.getAgentPosition(self.index),
action, reward,
gs.getAgentPosition(self.index)
), write_file)
# Replay
if len(self.memory) > batch_size:
self.replay(batch_size)
legal_actions = gs.getLegalActions(self.index)
# Random Action
if np.random.rand() <= self.epsilon:
best_action = random.choice(legal_actions)
# Best Action
else:
act_values = self.model.predict(self.preprocessGS(gs))
legal_actions_i = [self.actions.index(a) for a in legal_actions]
best_action = np.argmax(act_values[0][legal_actions_i])
best_action = self.actions[legal_actions_i[best_action]]
return best_action # returns action
def preprocessGS(self, gs):
data = []
layout = gs.data.layout.layoutText
# new_layout = np.zeros(((16,)))
for i, row in enumerate(layout):
new_row = row.replace(" ", "0") \
.replace("%", "5") \
.replace(".", "6") \
.replace("o", "7")
data += [float(x) / float(7) for x in list(new_row)]
# + [str(self.actions.index(action))]
return np.reshape(np.asarray(data, dtype=float).flatten(), (1, self.input_shape))
def min_dist_to_food(self, gs, agent_pos):
food_pos = []
for i, r in enumerate(self.getFood(gs).data):
for j, c in enumerate(r):
if self.getFood(gs).data[i][j]:
food_pos += [(i, j)]
return np.min([self.getMazeDistance(agent_pos, f)
for f in food_pos])
def min_dist_to_op(self, gs, agent_pos):
op_pos = [gs.getAgentPosition(i)
for i in self.getOpponents(gs)]
return np.min([self.getMazeDistance(agent_pos, f)
for f in op_pos])
def isAgentDead(self, new_gs, old_gs):
new_loc = new_gs.getAgentPosition(self.index)
old_loc = old_gs.getAgentPosition(self.index)
op_pos = [new_gs.getAgentPosition(i)
for i in self.getOpponents(new_gs)]
if old_loc in op_pos and new_loc == self.start:
return True
return False
def isOpDead(self, new_gs, old_gs):
op_i = self.getOpponents(new_gs)
new_op_locs = [new_gs.getAgentPosition(i) for i in op_i]
old_op_locs = [old_gs.getAgentPosition(i) for i in op_i]
old_loc = old_gs.getAgentPosition(self.index)
new.gs.getAgentState.start
if old_loc in op_pos and new_loc == self.start:
return True
return False
def getDirection(self, prev_pos, curr_pos):
if prev_pos[0] < curr_pos[0]:
return 'West'
elif prev_pos[0] > curr_pos[0]:
return 'East'
else:
if prev_pos[1] < curr_pos[1]:
return 'North'
elif prev_pos[1] > curr_pos[1]:
return 'South'
else:
return 'Stop'
class OffDQNAgent(DQNAgent):
def getReward(self, new_gs, old_gs):
# init
new_agent = new_gs.getAgentState(self.index)
old_agent = old_gs.getAgentState(self.index)
new_loc = new_gs.getAgentPosition(self.index)
old_loc = old_gs.getAgentPosition(self.index)
# op_pos = [new_gs.getAgentPosition(i)
# for i in self.getOpponents(new_gs)]
food_pos = []
for i, r in enumerate(self.getFood(old_gs).data):
for j, c in enumerate(r):
if self.getFood(old_gs).data[i][j]:
food_pos += [(i, j)]
reward = 0
# Move closer to food
reward += 20.0 * (self.min_dist_to_food(old_gs, old_loc) -
self.min_dist_to_food(old_gs, new_loc)) / float(old_agent.numCarrying + 1) - 3.0
# No movement
if old_loc == new_loc:
reward -= 4.0
# Close to Food
reward += (50.0 - self.min_dist_to_food(old_gs, new_loc)) / 10.0
# Holding too many
reward -= new_agent.numCarrying * 1.5
# pick up dot
r, c = new_loc
if self.getFood(old_gs).data[r][c]:
reward += 50.0
# return dots to side
reward += 200.0 * (new_agent.numReturned - old_agent.numReturned)
# died
if self.isAgentDead(new_gs, old_gs):
reward -= 500.0
# close to op
if new_agent.isPacman:
old_distances = min(
old_gs.agentDistances[self.getOpponents(old_gs)])
new_distances = min(
old_gs.agentDistances[self.getOpponents(old_gs)])
if new_distances < 4:
reward -= (5 - new_distances) * 20.0
with open("off_rewards.json", "w") as write_file:
json.dump(reward, write_file)
return reward
class DefDQNAgent(DQNAgent):
def getReward(self, new_gs, old_gs):
# init
new_agent = new_gs.getAgentState(self.index)
old_agent = old_gs.getAgentState(self.index)
new_loc = new_gs.getAgentPosition(self.index)
old_loc = old_gs.getAgentPosition(self.index)
# op_pos = [old_gs.getAgentPosition(i)
# for i in self.getOpponents(old_gs)]
op_indices = self.getOpponents(old_gs)
reward = 0
# if not (new_agent.isPacman):
# min_dist_to_op = self.min_dist_to_op(old_gs, new_loc)
# reward = float(50) - min_dist_to_op
# if(min_dist_to_op == 0):
# reward += 200
if new_agent.isPacman:
reward -= 50
# living penalty while on defensive side -> reward = -.03
# if not (new_agent.isPacman):
# reward -= .03
# # capture opponent -> 20
# min_dist_to_op = self.min_dist_to_op(old_gs, new_loc)
# if(min_dist_to_op == 0):
# reward += 20
# # Opponent far -> -1 Opponent close -> 1
# else:
# reward += math.abs(min_dist_to_op / float(50) - 1)
# living penalty while on offensive side -> reward = -.05
# else:
# reward -= .05
# # died -> -50
# if self.isDead(new_gs, old_gs):
# reward -= 50
# # if opponent returns dots -> reward = -3 * num returned
# old_num_returned = [old_gs.getAgentState(i).numReturned
# for i in op_indices]
# new_num_returned = [new_gs.getAgentState(i).numReturned
# for i in op_indices]
# reward -= 3 * (sum(new_num_returned) - sum(old_num_returned))
with open("def_rewards.json", "w") as write_file:
json.dump(reward, write_file)
return reward
| 34.256236 | 112 | 0.583769 | 12,912 | 0.854703 | 0 | 0 | 0 | 0 | 0 | 0 | 4,814 | 0.31866 |
6f5beb9a275be58bdbdf0da45a8be956e56f7cc6 | 850 | py | Python | wordBreak2.py | saai/LeetcodePythonSolutions | 201f2054dda3f303ae6a376b40cbc7f98688322c | [
"MIT"
]
| null | null | null | wordBreak2.py | saai/LeetcodePythonSolutions | 201f2054dda3f303ae6a376b40cbc7f98688322c | [
"MIT"
]
| null | null | null | wordBreak2.py | saai/LeetcodePythonSolutions | 201f2054dda3f303ae6a376b40cbc7f98688322c | [
"MIT"
]
| null | null | null | class Solution:
# @param s, a string
# @param wordDict, a set<string>
# @return a string[]
def wordBreak(self, s, wordDict):
n = len(s)
res = []
chars = ''.join(wordDict)
for i in xrange(n):
if s[i] not in chars:
return res
lw = s[-1]
lw_end = False
for word in wordDict:
if word[-1] == lw:
lw_end = True
if not lw_end:
return res
self.dfs(s,[],wordDict,res)
return res
def dfs(self, s, path,wordDict,res):
if not s:
res.append(' '.join(path[:]))
return
for i in range(1,len(s)+1):
c = s[:i]
if c in wordDict:
path.append(c)
self.dfs(s[i:],path,wordDict,res)
path.pop() | 28.333333 | 49 | 0.443529 | 850 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.090588 |
6f5c96a2170db005f0df74623642b0c6df9f9c2a | 433 | py | Python | setup.py | astrodeepnet/sbi_experiments | 70af041da08565ba15e0c011145b11ab3fd973d7 | [
"MIT"
]
| 3 | 2021-12-11T20:57:07.000Z | 2021-12-14T22:20:42.000Z | setup.py | astrodeepnet/sbi_experiments | 70af041da08565ba15e0c011145b11ab3fd973d7 | [
"MIT"
]
| 20 | 2021-11-15T17:08:54.000Z | 2022-03-25T10:32:52.000Z | setup.py | astrodeepnet/sbi_experiments | 70af041da08565ba15e0c011145b11ab3fd973d7 | [
"MIT"
]
| 3 | 2021-11-22T21:44:04.000Z | 2021-12-14T10:31:46.000Z | from setuptools import setup, find_packages
setup(
name='SBIExperiments',
version='0.0.1',
url='https://github.com/astrodeepnet/sbi_experiments',
author='Justine Zeghal and friends',
description='Package for numerical experiments of SBI tools',
packages=find_packages(),
install_requires=[
'numpy>=1.19.2',
'jax>=0.2.0',
'tensorflow_probability>=0.14.1',
'scikit-learn>=0.21',
'jaxopt>=0.2'
],
)
| 24.055556 | 63 | 0.681293 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 240 | 0.554273 |
6f5dc8fddaaa9b918695e316e6b45bac16a19712 | 880 | py | Python | deploy/deploy_asterisk_provider2.py | orpolaczek/astricon-2017-demos | 1d3f24a72b19bf7ecf70831fd6b122cde59ea47b | [
"MIT"
]
| null | null | null | deploy/deploy_asterisk_provider2.py | orpolaczek/astricon-2017-demos | 1d3f24a72b19bf7ecf70831fd6b122cde59ea47b | [
"MIT"
]
| null | null | null | deploy/deploy_asterisk_provider2.py | orpolaczek/astricon-2017-demos | 1d3f24a72b19bf7ecf70831fd6b122cde59ea47b | [
"MIT"
]
| 1 | 2018-09-14T08:32:07.000Z | 2018-09-14T08:32:07.000Z | import datetime
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from pprint import pprint
Engine = get_driver(Provider.ELASTICHOSTS)
driver = Engine("733b7dc7-7498-4db4-9dc4-74d3fee8abed",
secret="6w6CDAqL6JyXFj3xNkWW2zpUjYfv9dYaLVdaaR4Y",
secure=False)
images = driver.list_images()
sizes = driver.list_sizes()
IMAGE_ID = '38df09864d854b76b5023878ffc80161'
image = [i for i in images if i.id == IMAGE_ID][0]
pprint(images)
pprint(sizes)
node = driver.deploy_node(
name="astricon-{}".format(datetime.datetime.now().strftime('%Y-%m-%dt%H%M%S')),
image=image,
size=sizes[3],
script='deploy-script.sh',
enable_root=True,
vnc_password="myStr0ngr00tpa55wo7d")
print("Waiting for Node")
driver.wait_until_running([node], 10, 1000)
print("Node is now running")
| 25.882353 | 83 | 0.729545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.253409 |
6f5df725ff569b1c32118a15233cd3613598d3f9 | 95 | py | Python | todo/admin.py | haidoro/TODO_lesson | fa0b92eb5d6f05ee15900dcc407e1ae3451fee5b | [
"CECILL-B"
]
| null | null | null | todo/admin.py | haidoro/TODO_lesson | fa0b92eb5d6f05ee15900dcc407e1ae3451fee5b | [
"CECILL-B"
]
| null | null | null | todo/admin.py | haidoro/TODO_lesson | fa0b92eb5d6f05ee15900dcc407e1ae3451fee5b | [
"CECILL-B"
]
| null | null | null | from django.contrib import admin
from .models import TodoModel
admin.site.register(TodoModel)
| 19 | 32 | 0.831579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6f60ca83d2a6e347812ac821a5981278c5493d55 | 1,853 | py | Python | assignments/06-python-first-lines/first_lines.py | antoniog1995/biosys-analytics | f4ac78f0918c402b5405bcb95b6ad1f76f2d9f08 | [
"MIT"
]
| null | null | null | assignments/06-python-first-lines/first_lines.py | antoniog1995/biosys-analytics | f4ac78f0918c402b5405bcb95b6ad1f76f2d9f08 | [
"MIT"
]
| null | null | null | assignments/06-python-first-lines/first_lines.py | antoniog1995/biosys-analytics | f4ac78f0918c402b5405bcb95b6ad1f76f2d9f08 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
"""
Author : antoniog1
Date : 2019-02-21
Purpose: Rock the Casbah
"""
import argparse
import sys
import os
# --------------------------------------------------
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Argparse Python script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('positional', metavar='DIR', type = str, help='A positional argument', nargs="+")
parser.add_argument('-w', '--width', help='A named integer argument', metavar='int', type=int, default=50)
return parser.parse_args()
# --------------------------------------------------
def warn(msg):
"""Print a message to STDERR"""
print(msg, file=sys.stderr)
# --------------------------------------------------
def die(msg='Something bad happened'):
"""warn() and exit with error"""
warn(msg)
sys.exit(1)
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
width = args.width
directory = args.positional
for dir_name in directory:
dir_dict = {}
if not os.path.isdir(dir_name):
warn('"{}" is not a directory'.format(dir_name))
continue
print(dir_name)
for filename in os.listdir(dir_name):
path = os.path.join(dir_name,filename)
with open(path) as f:
first_line = f.readline().rstrip()
dir_dict[first_line] = filename
for line, file in sorted(dir_dict.items()):
num_per = width - len(line) - len(file)
ellipses = "." * num_per
print('{} {} {}'.format(line,ellipses,file))
# --------------------------------------------------
if __name__ == '__main__':
main()
| 28.953125 | 110 | 0.51538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 659 | 0.35564 |
6f60f993f98cc6ec2f6e673c50ecaa903ed57a90 | 4,042 | py | Python | lvmsurveysim/target/target.py | albireox/lvmsurveysim | 8ebe8ae7a90e5f50c4ea186b947a49720b7ed9ed | [
"BSD-3-Clause"
]
| null | null | null | lvmsurveysim/target/target.py | albireox/lvmsurveysim | 8ebe8ae7a90e5f50c4ea186b947a49720b7ed9ed | [
"BSD-3-Clause"
]
| null | null | null | lvmsurveysim/target/target.py | albireox/lvmsurveysim | 8ebe8ae7a90e5f50c4ea186b947a49720b7ed9ed | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python
# encoding: utf-8
#
# @Author: José Sánchez-Gallego
# @Date: Oct 10, 2017
# @Filename: target.py
# @License: BSD 3-Clause
# @Copyright: José Sánchez-Gallego
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import pathlib
import yaml
from . import regions
from .. import config
class Target(object):
"""A representation of an astronomical target.
Defines a target, including target centre, area on the sky, surface
brightnes, etc. See the section :ref:`target-defining` for more
information.
Parameters:
name (str):
The identifier of this target, e.g., ``'M81'``.
coords (tuple or `~astropy.coordinates.SkyCoord`):
A tuple of ``(ra, dec)`` in degrees or a
`~astropy.coordinates.SkyCoord` describing the centre of the
target. If the region is of type ``polygon``, ``coords`` must
be a list of vertices as indicated in `~.regions.PolygonalRegion`.
region_type (str):
One of the valid region types for `~.regions.Region`.
region_params (dict):
A dictionary of parameters to be passed to `~.regions.Region`.
Example:
>>> target = Target('MyTarget', coords=(169, 65), region_type='circle',
>>> region_params={'r': 0.1})
>>> target
<Region 'MyTarget'>
>>> target.region
<CircularRegion (coords=<SkyCoord (ICRS): (ra, dec) in deg
( 169., 65.)>, r=0.100 deg)>
"""
def __init__(self, name, coords, region_type, region_params={}):
self.name = name
self.coords = coords
self.region = self._create_region(coords, region_type, region_params)
def __repr__(self):
return f'<Region {self.name!r}>'
@staticmethod
def _create_region(coords, region_type, region_params):
"""Returns a `.regions.Region` with the target on the sky."""
return regions.Region(region_type, coords, **region_params)
@classmethod
def from_target_list(cls, name, target_list=None):
"""Returns an instance of `.Target` from a target list.
Initialises a new target whose parameters have been previously defined
in a target list. Target lists must be YAML files in which each
target has attributes ``coords``, ``region_params``, and
``region_params``, defined as in :ref:`target-defining`. For example:
.. code-block:: yaml
M81:
coords: [148.888333, 69.0652778]
region_type: 'ellipse'
region_params:
a: 0.209722
b: 0.106958333
pa: 149
Parameters:
name (str):
The identifier for the target. Must be defined in the target
list file.
target_list (str, `~pathlib.Path`, or None):
The path to the YAML file containing the target list. If
``None``, default to the target list contained in ``lvmcore``.
Example:
>>> from lvmsurveysim.target import Target
>>> m81 = Target.from_target_list('M81')
"""
if target_list is None:
target_list = pathlib.Path(
os.path.expanduser(os.path.expandvars(config['target_list'])))
else:
target_list = pathlib.Path(target_list)
assert target_list.exists()
targets = yaml.load(open(str(target_list)))
assert name in targets, 'target not found in target list.'
target = targets[name]
return cls(name, target['coords'], region_type=target['region_type'],
region_params=target['region_params'])
def plot(self, **kwargs):
"""Plots the target.
Parameters:
kwargs (dict):
Keyword arguments to be pased to `.regions.Region.plot`.
"""
return self.region.plot(**kwargs)
| 30.854962 | 79 | 0.596487 | 3,666 | 0.90608 | 0 | 0 | 1,942 | 0.47998 | 0 | 0 | 2,786 | 0.688581 |
6f628605ce1d839a711154a74d7ae1743fe66d28 | 1,889 | py | Python | samples/snippets/test_export_to_bigquery.py | renovate-bot/python-contact-center-insights | d133f4028d862cc39d10ba4b0879df256a3505c1 | [
"Apache-2.0"
]
| 4 | 2021-08-15T04:55:44.000Z | 2022-02-01T09:19:57.000Z | samples/snippets/test_export_to_bigquery.py | renovate-bot/python-contact-center-insights | d133f4028d862cc39d10ba4b0879df256a3505c1 | [
"Apache-2.0"
]
| 53 | 2021-07-16T11:02:44.000Z | 2022-03-07T16:39:20.000Z | samples/snippets/test_export_to_bigquery.py | renovate-bot/python-contact-center-insights | d133f4028d862cc39d10ba4b0879df256a3505c1 | [
"Apache-2.0"
]
| 5 | 2021-07-15T18:17:53.000Z | 2022-01-29T08:09:16.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
import google.auth
from google.cloud import bigquery
import pytest
import export_to_bigquery
GCLOUD_TESTS_PREFIX = "python_samples_tests"
@pytest.fixture
def project_id():
_, project_id = google.auth.default()
return project_id
@pytest.fixture
def unique_id():
uuid_hex = uuid.uuid4().hex[:8]
return f"{GCLOUD_TESTS_PREFIX}_{uuid_hex}"
@pytest.fixture
def bigquery_resources(project_id, unique_id):
# Create a BigQuery dataset.
bigquery_client = bigquery.Client()
dataset_id = unique_id
table_id = unique_id
dataset = bigquery.Dataset(f"{project_id}.{dataset_id}")
dataset.location = "US"
bigquery_client.create_dataset(dataset, timeout=30)
# Create a BigQuery table under the created dataset.
table = bigquery.Table(f"{project_id}.{dataset_id}.{table_id}")
bigquery_client.create_table(table)
yield dataset_id, table_id
# Delete the BigQuery dataset and table.
bigquery_client.delete_dataset(dataset_id, delete_contents=True)
def test_export_data_to_bigquery(capsys, project_id, bigquery_resources):
dataset_id, table_id = bigquery_resources
export_to_bigquery.export_to_bigquery(project_id, project_id, dataset_id, table_id)
out, err = capsys.readouterr()
assert "Exported data to BigQuery" in out
| 28.621212 | 87 | 0.755956 | 0 | 0 | 630 | 0.33351 | 858 | 0.454209 | 0 | 0 | 837 | 0.443092 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.