hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c2d56eb3fc63f344838e0246da51b41752555a49
| 3,265 |
py
|
Python
|
sdk/media/azure-mgmt-media/azure/mgmt/media/_configuration.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | 2 |
2021-03-24T06:26:11.000Z
|
2021-04-18T15:55:59.000Z
|
sdk/media/azure-mgmt-media/azure/mgmt/media/_configuration.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | 4 |
2019-04-17T17:57:49.000Z
|
2020-04-24T21:11:22.000Z
|
sdk/media/azure-mgmt-media/azure/mgmt/media/_configuration.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | 1 |
2019-04-05T18:17:43.000Z
|
2019-04-05T18:17:43.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
VERSION = "unknown"
class AzureMediaServicesConfiguration(Configuration):
"""Configuration for AzureMediaServices.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The unique identifier for a Microsoft Azure subscription.
:type subscription_id: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(AzureMediaServicesConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2020-05-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-media/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| 45.985915 | 129 | 0.684533 |
0fcc29cf778b4ef4a34a2134d052db01da605f23
| 1,897 |
py
|
Python
|
Jann/interact_with_model.py
|
daghan/jann
|
1ce6a74a99313038e5823e760395f9c303ea5b5f
|
[
"MIT"
] | null | null | null |
Jann/interact_with_model.py
|
daghan/jann
|
1ce6a74a99313038e5823e760395f9c303ea5b5f
|
[
"MIT"
] | null | null | null |
Jann/interact_with_model.py
|
daghan/jann
|
1ce6a74a99313038e5823e760395f9c303ea5b5f
|
[
"MIT"
] | null | null | null |
import sys
import tensorflow as tf
import utils
def main(arguments):
"""Main run function for interacting with the model."""
# Parse the arguments
args = utils.parse_arguments(arguments)
tf.logging.info('Loading unique strings.')
data_path = args.infile
unique_strings_path = data_path + '.embedded.pkl_unique_strings.csv'
# load the unique lines
with open(unique_strings_path) as f:
unique_strings = [line.rstrip() for line in f]
tf.logging.info('Lodaded {} unique strings'.format(len(unique_strings)))
# define the path of the nearest neighbor model to use
annoy_index_path = data_path + '.ann'
# Load generative models from pickles to generate from scratch.
try:
tf.logging.info('Build generative model...')
gen_model_use = utils.GenModelUSE(
annoy_index_path=annoy_index_path,
unique_strings=unique_strings,
module_path=args.module_path,
use_sentence_piece=args.use_sentence_piece
)
tf.logging.info('Generative model built.')
except (OSError, IOError) as e:
tf.logging.error(e)
tf.logging.info('Error building generative model.')
# build a loop for interactive mode
while True:
# get user input
user_input = input('\nQuery Text: ')
# if user input is too short
if len(user_input) < 1:
continue
nns, distances = gen_model_use.inference(
user_input,
num_neighbors=args.num_neighbors,
use_sentence_piece=args.use_sentence_piece)
# print all the returned responses, and distance to input
for nn, distance in zip(nns, distances):
print('d: {}, {}'.format(
distance,
unique_strings[nn].split(args.delimiter)))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 31.098361 | 76 | 0.643121 |
db1b95c0770190f8dd5656be8a6271001d0d00b4
| 894 |
py
|
Python
|
installment/urls.py
|
erpstudio/zki
|
7dcc33a7286ea375d121f158b1d23706f81f5d6a
|
[
"Apache-2.0"
] | null | null | null |
installment/urls.py
|
erpstudio/zki
|
7dcc33a7286ea375d121f158b1d23706f81f5d6a
|
[
"Apache-2.0"
] | null | null | null |
installment/urls.py
|
erpstudio/zki
|
7dcc33a7286ea375d121f158b1d23706f81f5d6a
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from . import saleentry_view, installment_view, report_view
urlpatterns = [
# Reports
path('installments/report', report_view.index, name='installment.report.index'),
# Installments
path('installments/today', installment_view.today, name='installment.today'),
path('installments/pending', installment_view.pending, name='installment.pending'),
path('installments/payment/<int:id>/', installment_view.payment, name='installment.payment'),
path('installments/updateStatustoPaid/', installment_view.updateStatustoPaid, name='installment.updateStatustoPaid'),
# Purchase Entry
path('sale/entry/add', saleentry_view.add, name='sale.entry.add'),
path('sale/entry/list', saleentry_view.list, name='sale.entry.list'),
path('sale/entry/<int:id>/', saleentry_view.show, name='sale.entry.show'),
path('sale/entry/save', saleentry_view.save, name='sale.entry.save'),
]
| 40.636364 | 117 | 0.774049 |
b292aca1b7666158ed8de89e8822c630af852ead
| 20,299 |
py
|
Python
|
tests/system/python/packages/test_opcua.py
|
DDC-NDRS/fledge-iot_fledge
|
27a5e66a55daaab1aca14ce6e66f9f1e6efaef51
|
[
"Apache-2.0"
] | 1 |
2020-05-04T12:23:42.000Z
|
2020-05-04T12:23:42.000Z
|
tests/system/python/packages/test_opcua.py
|
DDC-NDRS/fledge-iot_fledge
|
27a5e66a55daaab1aca14ce6e66f9f1e6efaef51
|
[
"Apache-2.0"
] | null | null | null |
tests/system/python/packages/test_opcua.py
|
DDC-NDRS/fledge-iot_fledge
|
27a5e66a55daaab1aca14ce6e66f9f1e6efaef51
|
[
"Apache-2.0"
] | null | null | null |
# FLEDGE_BEGIN
# See: http://fledge-iot.readthedocs.io/
# FLEDGE_END
""" Test OPCUA System tests:
* Prerequisite:
a) On First instance
- Install fledge-south-opcua and fledge-south-s2opcua
- Install fledge-north-opcua
- Use Prosys OPCUA simulator with set of simulated data with all supoorted data types
- Use Prosys OPCUA client to connect north opcua server and then browse around the objects
that Fledge is creating And those subscriptions to second fledge instance
Download:
Prosys OPCUA client from https://downloads.prosysopc.com/opc-ua-client-downloads.php
Prosys OPCUA server from https://downloads.prosysopc.com/opc-ua-simulation-server-downloads.php
b) On Second instance (manual process)
- Install fledge, fledge-south-opcua packages And Make sure Fledge is in running mode with reset data
* Test:
- Add south service with opcua/s2opcua plugin
- Create data points with supported types from simulator
- Verify the readings of data is correct and that will be from an asset API
- Add north service with opcua plugin
- Publish data to north-opcua and use another Fledge instance to read the data and compare the data
between two instances
"""
__author__ = "Ashish Jabble"
__copyright__ = "Copyright (c) 2021 Dianomic Systems, Inc."
import subprocess
import time
import utils
import pytest
import platform
import urllib.parse
from typing import Tuple
""" First FL instance IP Address """
FL1_INSTANCE_IP = "192.168.1.8"
""" Second FL instance IP Address """
FL2_INSTANCE_IP = "192.168.1.7"
""" Packages list for FL instances """
PKG_LIST = "fledge-south-opcua fledge-south-s2opcua fledge-north-opcua"
""" opcua south plugin name """
OPCUA_SOUTH_PLUGIN_NAME = "opcua"
""" s2opcua south plugin name """
S2OPCUA_SOUTH_PLUGIN_NAME = "s2opcua"
""" Service name with opcua south plugin """
OPCUA_SOUTH_SVC_NAME = "OPCUA #1"
""" Service name with s2opcua south plugin """
S2OPCUA_SOUTH_SVC_NAME = "S2 OPC-UA"
""" opcua north plugin name """
OPCUA_NORTH_PLUGIN_NAME = "opcua"
""" Service name with opcua north plugin """
OPCUA_NORTH_SVC_NAME = "OPCUA"
""" opcua readings asset count as configured in Prosys simulation server """
OPCUA_ASSET_COUNT = 12
""" s2opcua readings asset count as configured in Prosys simulation server """
S2OPCUA_ASSET_COUNT = 12
""" Asset prefix for opcua south plugin """
OPCUA_ASSET_NAME = "opcua"
""" Asset prefix for s2opcua south plugin """
S2OPCUA_ASSET_NAME = "s2opcua"
""" Server URL used in south opcua and s2opcua plugin configuration to get readings data """
OPCUA_SERVER_URL = "opc.tcp://{}:53530/OPCUA/SimulationServer".format(FL1_INSTANCE_IP)
""" Server URL used in north opcua plugin configuration for to pull the data """
OPCUA_NORTH_SERVER_URL = "opc.tcp://{}:4840/fledge/server".format(FL1_INSTANCE_IP)
""" Supported data types lists and in tuple format (data type, node identifier, data value)
as given Prosys Simulation settings. NOTE: Create in an order and node display name as is """
SUPPORTED_DATA_TYPES = [("Boolean", 1008, 0), ("SByte", 1009, -128), ("Byte", 1010, 128), ("Int16", 1011, -32768),
("UInt16", 1012, 65535), ("Int32", 1013, -2147483648), ("UInt32", 1014, 4294967295),
("Int64", 1015, -9223372036854775808), ("UInt64", 1016, 18446744073709551615),
("Float", 1017, -3.4E38), ("Double", 1018, 1.7E308), ("String", 1019, "0.0")]
""" Subscription plugin configuration used for both opcua and s2opcua south plugins """
SUBSCRIPTION = ["ns=3;i={}".format(SUPPORTED_DATA_TYPES[0][1]), "ns=3;i={}".format(SUPPORTED_DATA_TYPES[1][1]),
"ns=3;i={}".format(SUPPORTED_DATA_TYPES[2][1]), "ns=3;i={}".format(SUPPORTED_DATA_TYPES[3][1]),
"ns=3;i={}".format(SUPPORTED_DATA_TYPES[4][1]), "ns=3;i={}".format(SUPPORTED_DATA_TYPES[5][1]),
"ns=3;i={}".format(SUPPORTED_DATA_TYPES[6][1]), "ns=3;i={}".format(SUPPORTED_DATA_TYPES[7][1]),
"ns=3;i={}".format(SUPPORTED_DATA_TYPES[8][1]), "ns=3;i={}".format(SUPPORTED_DATA_TYPES[9][1]),
"ns=3;i={}".format(SUPPORTED_DATA_TYPES[10][1]), "ns=3;i={}".format(SUPPORTED_DATA_TYPES[11][1])
]
""" OPCUA objects which will be used when we pull the data from north opcua to second FL instance (FL2_INSTANCE_IP) """
OPCUA_OBJECTS = [("{}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2001, "false"),
("{}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2002, -128),
("{}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2003, 128),
("{}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2004, -32768),
("{}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2005, 65535),
("{}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2006, -2147483648),
("{}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2007, 4294967295),
("{}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2008, -9223372036854775808),
("{}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2009, 18446744073709551615),
("{}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2010, -3.4E38),
("{}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2011, 1.7E308),
("{}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2012, "0.0"),
("{}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2013, 0),
("{}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2014, -128),
("{}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2015, 128),
("{}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2016, -32768),
("{}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2017, 65535),
("{}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2018, -2147483648),
("{}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2019, 4294967295),
("{}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2020, -9223372036854775808),
("{}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2021, 18446744073709551615),
("{}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2022, -3.4E38),
("{}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2023, 1.7E308),
("{}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]), 2024, "0.0")
]
""" Subscription plugin configuration used for north opcua plugin """
SUBSCRIPTION2 = ["ns=2;s={}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][1]),
"ns=2;s={}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[1][1]),
"ns=2;s={}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[2][1]),
"ns=2;s={}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[3][1]),
"ns=2;s={}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[4][1]),
"ns=2;s={}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[5][1]),
"ns=2;s={}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[6][1]),
"ns=2;s={}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[7][1]),
"ns=2;s={}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[8][1]),
"ns=2;s={}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[9][1]),
"ns=2;s={}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[10][1]),
"ns=2;s={}{}".format(OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[11][1]),
"ns=2;s={}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[0][0]),
"ns=2;s={}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[1][0]),
"ns=2;s={}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[2][0]),
"ns=2;s={}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[3][0]),
"ns=2;s={}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[4][0]),
"ns=2;s={}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[5][0]),
"ns=2;s={}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[6][0]),
"ns=2;s={}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[7][0]),
"ns=2;s={}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[8][0]),
"ns=2;s={}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[9][0]),
"ns=2;s={}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[10][0]),
"ns=2;s={}{}".format(S2OPCUA_ASSET_NAME, SUPPORTED_DATA_TYPES[11][0])
]
@pytest.fixture
def install_pkg():
""" Fixture used for to install packages and only used in First FL instance """
try:
os_platform = platform.platform()
pkg_mgr = 'yum' if 'centos' in os_platform or 'redhat' in os_platform else 'apt'
subprocess.run(["sudo {} install -y {}".format(pkg_mgr, PKG_LIST)], shell=True, check=True)
except subprocess.CalledProcessError:
assert False, "{} one of installation package failed".format(PKG_LIST)
def add_service(fledge_url: str, wait_time: int, name: str, _type: str, plugin: str, config: dict) -> None:
""" Used to add any service """
data = {
"name": name,
"type": _type,
"plugin": plugin,
"enabled": "true",
"config": config
}
utils.post_request(fledge_url, "/fledge/service", data)
# extra wait time needed
time.sleep(2 * wait_time)
def get_ping_data(fledge_url: str, key: str, asset_count: int) -> Tuple[int, str]:
""" Used to get ping info """
ping_info = utils.get_request(fledge_url, "/fledge/ping")
assert key in ping_info
total_read = asset_count
# Special handling requires when both plugin runs
if ping_info[key] > asset_count:
total_read = OPCUA_ASSET_COUNT + S2OPCUA_ASSET_COUNT
return total_read, ping_info[key]
def get_asset_readings(fledge_url: str, asset_prefix: str, plugin_name: str, data: list) -> None:
""" Used to get asset readings for an asset code """
for obj in data:
asset_suffix = str(obj[1]) if plugin_name == OPCUA_SOUTH_PLUGIN_NAME else obj[0]
asset_name = "{}{}".format(asset_prefix, asset_suffix)
jdoc = utils.get_request(fledge_url, "/fledge/asset/{}".format(asset_name))
if jdoc:
result = jdoc[0]['reading'][str(asset_suffix)]
print("Asset Reading Jdoc: {} \nExpected:{} == Actual:{}".format(jdoc[0]['reading'], obj[2], result))
# TODO: FOGL-6076 - readings mismatched for some data types
if asset_suffix not in ("SByte", "Byte", "Int16", "Int32", "UInt64", "1016", "Float", "1017",
"Double", "1018", "2007", "2008", "2009", "2010", "2011", "2014",
"2016", "2019", "2020", "2021", "2022", "2023"):
# For opcua plugin it is treated as false Not 0
if asset_suffix == "1008":
assert "false" == result
else:
assert obj[2] == result
else:
print("Verification skipped for an asset: {}; Due to Bug exists. See FOGL-6076".format(asset_name))
else:
print("Reading not found for an asset code: {}".format(asset_name))
def verify_service(fledge_url: str, retries: int, svc_name: str, plugin_name: str, _type: str,
asset_count: int) -> None:
""" Used for verification of any service"""
get_url = "/fledge/south" if _type == "Southbound" else "/fledge/north"
while retries:
result = utils.get_request(fledge_url, get_url)
if _type == "Southbound":
if len(result["services"]):
svc_info = [s for s in result["services"] if s['name'] == svc_name]
if 'status' in svc_info[0] and svc_info[0]['status'] != "":
assert svc_name == svc_info[0]['name']
assert 'running' == svc_info[0]['status']
assert plugin_name == svc_info[0]['plugin']['name']
assert asset_count == len(svc_info[0]['assets'])
break
else:
if len(result):
svc_info = [s for s in result if s['name'] == svc_name]
if 'status' in svc_info[0] and svc_info[0]['status'] != "":
assert svc_name == svc_info[0]['name']
assert 'north_C' == svc_info[0]['processName']
assert 'running' == svc_info[0]['status']
# assert total_read == svc_info[0]['sent']
assert OPCUA_NORTH_PLUGIN_NAME == svc_info[0]['plugin']['name']
break
retries -= 1
if retries == 0:
assert False, "TIMEOUT! Data NOT seen for {} with endpoint {}".format(svc_name, get_url)
def verify_asset_and_readings(fledge_url: str, total_assets: int, asset_name: str, plugin_name: str,
data: list) -> None:
""" Used for verification of assets and readings """
result = utils.get_request(fledge_url, "/fledge/asset")
assert len(result), "No asset found"
assert total_assets == len(result)
get_asset_readings(fledge_url, asset_name, plugin_name, data)
def verify_asset_tracking_details(fledge_url: str, total_assets: int, svc_name: str, asset_name_prefix: str,
plugin_name: str, event: str, data: list) -> None:
""" Used for verification of asset tracker details """
tracking_details = utils.get_request(fledge_url, urllib.parse.quote("/fledge/track?service={}&event={}".format(
svc_name, event), safe='?,=,&,/'))
assert len(tracking_details["track"]), "Failed to track Ingest event"
assert total_assets == len(tracking_details["track"])
for record in tracking_details['track']:
for idx, val in enumerate(data):
asset = "{}{}".format(asset_name_prefix, val[0]) if plugin_name == S2OPCUA_SOUTH_PLUGIN_NAME else \
"{}{}".format(asset_name_prefix, str(val[1]))
if asset in record['asset']:
print("Asset Tracking JDoc: {} \nExpected:{} == Actual:{}".format(record, asset, record['asset']))
assert asset == record['asset']
assert event == record['event']
assert plugin_name == record['plugin']
assert svc_name == record['service']
break
class TestSouthOPCUA:
""" To test south opcua plugins """
# NOTE: Below test can be mark as skip if already executed as this requires only to be run once on an instance
# @pytest.mark.skip(reason="Already installed the packages")
def test_clean_install(self, clean_setup_fledge_packages, install_pkg):
pass
def test_setup(self, reset_and_start_fledge):
pass
@pytest.mark.parametrize("plugin_name, svc_name, asset_name, asset_count", [
(OPCUA_SOUTH_PLUGIN_NAME, OPCUA_SOUTH_SVC_NAME, OPCUA_ASSET_NAME, OPCUA_ASSET_COUNT),
(S2OPCUA_SOUTH_PLUGIN_NAME, S2OPCUA_SOUTH_SVC_NAME, S2OPCUA_ASSET_NAME, S2OPCUA_ASSET_COUNT)
])
def test_asset_readings_and_tracker_entry(self, fledge_url, retries, wait_time, plugin_name, svc_name, asset_name,
asset_count):
print("a) Adding {} south service...".format(svc_name))
config = {
"asset": {
"value": asset_name
},
"url": {
"value": OPCUA_SERVER_URL
},
"subscription": {
"value": {
"subscriptions": SUBSCRIPTION
}
}
}
add_service(fledge_url, wait_time, svc_name, "south", plugin_name, config)
print("b) Verifying {} south service and its details...".format(svc_name))
verify_service(fledge_url, retries, svc_name, plugin_name, "Southbound", asset_count)
print("c) Verifying data read in ping...")
total_read_count, data_read = get_ping_data(fledge_url, "dataRead", asset_count)
assert total_read_count == data_read
print("d) Verifying assets and readings...")
verify_asset_and_readings(fledge_url, total_read_count, asset_name, plugin_name, SUPPORTED_DATA_TYPES)
print("e) Verifying Ingest asset tracker entry...")
verify_asset_tracking_details(fledge_url, asset_count, svc_name, asset_name, plugin_name, "Ingest",
SUPPORTED_DATA_TYPES)
class TestNorthOPCUA:
""" To test north opcua plugin """
@pytest.mark.parametrize("asset_name, asset_count,", [
(OPCUA_ASSET_NAME, OPCUA_ASSET_COUNT),
(S2OPCUA_ASSET_NAME, S2OPCUA_ASSET_COUNT)
])
def test_service_and_sent_readings(self, fledge_url, retries, wait_time, asset_name, asset_count):
get_north_svc = utils.get_request(fledge_url, "/fledge/north")
if not get_north_svc:
print("a) Adding {} north service...".format(OPCUA_NORTH_PLUGIN_NAME))
config = {
"url":
{
"value": OPCUA_NORTH_SERVER_URL
}
}
add_service(fledge_url, wait_time, OPCUA_NORTH_SVC_NAME, "north", OPCUA_NORTH_PLUGIN_NAME, config)
print("b) Verifying {} north service and its details...".format(OPCUA_NORTH_SVC_NAME))
verify_service(fledge_url, retries, OPCUA_NORTH_SVC_NAME, OPCUA_NORTH_PLUGIN_NAME, "Northbound", asset_count)
print("c) Verifying data sent in ping...")
total_read_count, data_read = get_ping_data(fledge_url, "dataSent", asset_count)
assert total_read_count == data_read
print("d) Verifying Egress asset tracker entry...")
verify_asset_tracking_details(fledge_url, total_read_count, OPCUA_NORTH_SVC_NAME, asset_name,
OPCUA_NORTH_PLUGIN_NAME, "Egress", SUPPORTED_DATA_TYPES)
class TestPublishNorthOPCUA:
""" Publish the readings data to north using the fledge-north-opcua and use another FL instance to read the data.
Comparison of readings data between two FL instances to confirm data is correctly transmitted.
"""
def test_data_to_another_fl_instance(self, wait_time, retries):
rest_api_url = "{}:8081".format(FL2_INSTANCE_IP)
asset_count = OPCUA_ASSET_COUNT + S2OPCUA_ASSET_COUNT
print("Verifying publishing of data to another {} FL instance".format(FL2_INSTANCE_IP))
print("a) Adding {} south service...".format(OPCUA_SOUTH_SVC_NAME))
config = {
"asset": {
"value": OPCUA_ASSET_NAME
},
"url": {
"value": OPCUA_NORTH_SERVER_URL
},
"subscription": {
"value": {
"subscriptions": SUBSCRIPTION2
}
}
}
add_service(rest_api_url, wait_time, OPCUA_SOUTH_SVC_NAME, "south",
OPCUA_SOUTH_PLUGIN_NAME, config)
print("b) Verifying {} south service and its details...".format(OPCUA_SOUTH_SVC_NAME))
verify_service(rest_api_url, retries, OPCUA_SOUTH_SVC_NAME, OPCUA_SOUTH_PLUGIN_NAME, "Southbound", asset_count)
print("c) Verifying data read in ping...")
total_read_count, data_read = get_ping_data(rest_api_url, "dataRead", asset_count)
assert total_read_count == data_read
print("d) Verifying assets and readings...")
verify_asset_and_readings(rest_api_url, total_read_count, OPCUA_ASSET_NAME, OPCUA_SOUTH_PLUGIN_NAME,
OPCUA_OBJECTS)
print("e) Verifying Ingest asset tracker entry...")
verify_asset_tracking_details(rest_api_url, asset_count, OPCUA_SOUTH_SVC_NAME, OPCUA_ASSET_NAME,
OPCUA_SOUTH_PLUGIN_NAME, "Ingest", OPCUA_OBJECTS)
| 55.613699 | 119 | 0.621508 |
170776e34a989ba378fa9e9f60aa84575cee2b0b
| 15,120 |
py
|
Python
|
colorme/colorme.py
|
sourcery-ai-bot/FlapJack-Cogs
|
9d16bcaa6238f481298b07df2a5ff92a53d73108
|
[
"MIT"
] | null | null | null |
colorme/colorme.py
|
sourcery-ai-bot/FlapJack-Cogs
|
9d16bcaa6238f481298b07df2a5ff92a53d73108
|
[
"MIT"
] | null | null | null |
colorme/colorme.py
|
sourcery-ai-bot/FlapJack-Cogs
|
9d16bcaa6238f481298b07df2a5ff92a53d73108
|
[
"MIT"
] | null | null | null |
import asyncio
import re
import webcolors
import discord
from redbot.core import Config, checks, commands
class ColorMe(commands.Cog):
"""Manage the color of your own name."""
default_guild_settings = {
"protected_roles": []
}
def __init__(self, bot: commands.Bot):
self.bot = bot
self.conf = Config.get_conf(self, identifier=879271957)
self.conf.register_guild(
**self.default_guild_settings
)
self.suffix = ":color"
async def red_delete_data_for_user(self, **kwargs):
"""Nothing to delete."""
return
@staticmethod
def _is_sharing_role(ctx: commands.Context, role):
guild = ctx.message.guild
return any(
(role in member.roles) and (member.id != ctx.message.author.id)
for member in guild.members
)
def _could_be_colorme(self, role):
pattern = re.compile(r'#\d{4}\Z')
if pattern.search(role.name) is not None:
# Possible role created by old version
return True
elif role.name.endswith(self.suffix):
return True
return False
@staticmethod
def _elim_valid_roles(roles):
for role in roles:
if len(role.members) > 0:
roles.remove(role)
return roles
@staticmethod
def _already_has_colorme(ctx: commands.Context, rolename):
guild = ctx.message.guild
return discord.utils.get(guild.roles, name=rolename)
@staticmethod
def _color_converter(hex_code_or_color_word: str):
"""
Used for user input on color
Input: discord.Color name, CSS3 color name, 0xFFFFFF, #FFFFFF, FFFFFF
Output: 0xFFFFFF
"""
# #FFFFFF and FFFFFF to 0xFFFFFF
hex_match = re.match(r"#?[a-f0-9]{6}", hex_code_or_color_word.lower())
if hex_match:
hex_code = f"0x{hex_code_or_color_word.lstrip('#')}"
return hex_code
# discord.Color checking
if hasattr(discord.Color, hex_code_or_color_word):
hex_code = str(getattr(discord.Color, hex_code_or_color_word)())
hex_code = hex_code.replace("#", "0x")
return hex_code
# CSS3 color name checking
try:
hex_code = webcolors.name_to_hex(hex_code_or_color_word, spec="css3")
hex_code = hex_code.replace("#", "0x")
return hex_code
except ValueError:
pass
return None
@commands.guild_only()
@commands.group(name="colorme")
async def colorme(self, ctx):
"""Change the color of your name via custom roles."""
pass
@colorme.command(name="change")
@commands.cooldown(10, 60, commands.BucketType.user)
async def _change_colorme(self, ctx: commands.Context, newcolor: str):
"""Change the color of your name.
`newcolor` must be a hex code like `#990000` or `990000`, a [Discord color name](https://discordpy.readthedocs.io/en/latest/api.html#colour),
or a [CSS3 color name](https://www.w3.org/TR/2018/REC-css-color-3-20180619/#svg-color).
"""
guild = ctx.message.guild
member = ctx.message.author
name = member.name
disc = member.discriminator
top_role = member.top_role
protected_roles = await self.conf.guild(guild).protected_roles()
# color decoding time
newcolor = newcolor.replace(" ", "_")
newcolor = self._color_converter(newcolor)
if not newcolor:
await ctx.send(
"Not a valid color code. Use a hex code like #990000, a "
"Discord color name or a CSS3 color name.\n"
"<https://discordpy.readthedocs.io/en/latest/api.html#colour>\n"
"<https://www.w3.org/TR/2018/REC-css-color-3-20180619/#svg-color>"
)
return
role_to_change = None
for role in member.roles:
if role.id in protected_roles:
return await ctx.send("You have a role that is protected from color changes.")
if self._could_be_colorme(role):
if role_to_change is not None:
return await ctx.send("It looks like you have more than "
"one role that can be used for "
"ColorMe, so I'm not sure which one "
"to edit. Talk to your server admin "
"about fixing this!")
role_to_change = role
if role_to_change is None:
rolename = f"{name}#{disc}{self.suffix}"
if self._already_has_colorme(ctx, rolename):
await ctx.send("It looks like the server already has "
"a ColorMe role for you, but it's not "
"applied to you. To be safe, I'm not "
"going to make a new one. Please talk "
"to your server admin about fixing this!")
return
# Make a new cosmetic role for this person
try:
new_role = await guild.create_role(reason='Custom ColorMe Role',
name=rolename,
colour=discord.Colour(int(newcolor, 16)),
hoist=False,
permissions=discord.Permissions.none())
except discord.Forbidden:
await ctx.send("Failed to create new role. (permissions)")
return
except discord.HTTPException:
await ctx.send("Failed to create new role. (request failed)")
return
try:
await member.add_roles(new_role, reason='Custom ColorMe Role')
except discord.Forbidden:
await ctx.send("Failed to apply new role. (permissions)")
return
except discord.HTTPException:
await ctx.send("Failed to apply new role. (request failed)")
return
# Change to reply?
await ctx.send("Your new color is set.")
else:
# Member appears to have an existing ColorMe role
# Need to make sure they are not sharing with someone else
if not self._is_sharing_role(ctx, role_to_change):
try:
await role_to_change.edit(colour=discord.Colour(int(newcolor, 16)), reason='ColorMe Change')
except discord.Forbidden:
return await ctx.send("Failed to edit role. (permissions)")
except discord.HTTPException:
return await ctx.send("Failed to edit role. (request failed)")
# Change to reply?
await ctx.send("Your new color is set.")
else:
# Change to reply?
await ctx.send("This is odd. It looks like you have a "
"valid ColorMe role, but you're sharing "
"it with one or more members. To be "
"safe, I'm not going to edit it.")
return
@colorme.command(name="clean")
@checks.admin_or_permissions(manage_guild=True)
async def _clean_colorme(self, ctx: commands.Context):
"""Clean colorme roles by removing all permissions."""
user = ctx.message.author
guild = ctx.message.guild
dirty_roles = []
emoji = ('\N{WHITE HEAVY CHECK MARK}', '\N{CROSS MARK}')
for role in guild.roles:
if self._could_be_colorme(role):
if role.permissions != discord.Permissions.none():
dirty_roles.append(role)
if not dirty_roles:
await ctx.send("I couldn't find any ColorMe roles "
"that need to be cleaned.")
return
msg_txt = ("I have scanned the list of roles on this server. "
"I have detected the following roles which were "
"**possibly** created by ColorMe, but still have "
"permissions attached to them. Would you like me to "
"remove all permissions from these roles? If you are "
"unsure, **please** cancel and manually verify the roles. "
"These roles could have been created by another person or "
"bot, and this action is not reversible.\n\n"
"{} **to confirm**\n"
"{} **to cancel**\n```".format(emoji[0], emoji[1]))
msg_txt += '\n'.join([role.name for role in dirty_roles]) + '```'
msg = await ctx.send(msg_txt)
await msg.add_reaction(emoji[0])
await asyncio.sleep(0.5)
await msg.add_reaction(emoji[1])
def check(r, u):
return r.message.id == msg.id and u == user
try:
(r, u) = await self.bot.wait_for('reaction_add', check=check, timeout=600)
except asyncio.TimeoutError:
r = None
if r is None or r.emoji == emoji[1]:
await msg.clear_reactions()
return
if r.emoji == emoji[0]:
await msg.clear_reactions()
await ctx.send("Cleaning roles...")
for role in dirty_roles:
await asyncio.sleep(1)
try:
await role.edit(permissions=discord.Permissions.none(),
reason='ColorMe permission wipe')
except discord.Forbidden:
await ctx.send(f"Failed to edit role: {role.name} (permissions)")
except discord.HTTPException:
await ctx.send(f"Failed to edit role: {role.name} (request failed)")
await ctx.send("Finished cleaning roles!")
@colorme.command(name="purge")
@checks.admin_or_permissions(manage_guild=True)
async def _purge_colorme(self, ctx: commands.Context):
"""Purge the server of roles that may have been created
by ColorMe, but are no longer in use."""
user = ctx.message.author
guild = ctx.message.guild
dead_roles = []
emoji = ('\N{WHITE HEAVY CHECK MARK}', '\N{CROSS MARK}')
for role in guild.roles:
if self._could_be_colorme(role):
dead_roles.append(role)
dead_roles = self._elim_valid_roles(dead_roles)
if not dead_roles:
return await ctx.send("I couldn't find any roles to purge.")
msg_txt = ("I have scanned the list of roles on this server. "
"I have detected the following roles which were "
"**possibly** created by ColorMe, but are not any "
"member's top_role, and are useless for setting color. "
"Would you like me to delete these roles? If you are "
"unsure, **please** cancel and manually verify the roles. "
"These roles could have been created by another person or "
"bot, and this action is not reversible.\n\n"
"{} **to confirm**\n"
"{} **to cancel**\n```".format(emoji[0], emoji[1]))
msg_txt += '\n'.join([role.name for role in dead_roles]) + '```'
msg = await ctx.send(msg_txt)
await msg.add_reaction(emoji[0])
await asyncio.sleep(0.5)
await msg.add_reaction(emoji[1])
def check(r, u):
return r.message.id == msg.id and u == user
try:
(r, u) = await self.bot.wait_for('reaction_add', check=check, timeout=600)
except asyncio.TimeoutError:
r = None
if r is None or r.emoji == emoji[1]:
return await msg.clear_reactions()
if r.emoji == emoji[0]:
await msg.clear_reactions()
await ctx.send("Deleting roles...")
for role in dead_roles:
await asyncio.sleep(1)
try:
await role.delete(reason='ColorMe role purge')
except discord.Forbidden:
await ctx.send(f"Failed to delete role: {role.name} (permissions)")
except discord.HTTPException:
await ctx.send(f"Failed to delete role: {role.name} (request failed)")
await ctx.send("Finished deleting roles!")
@colorme.command(name="protect")
@checks.admin_or_permissions(manage_guild=True)
async def _protect_colorme(self, ctx, role: str):
"""Add a role to the list of protected roles.
Members with this role as top role will not be allowed to change color.
Example: [p]colorme protect admin
"""
guild = ctx.message.guild
protect_role = discord.utils.get(guild.roles, name=role)
if protect_role is None:
return await ctx.send("No roles match that name.")
protected_roles = await self.conf.guild(guild).protected_roles()
if protect_role.id in protected_roles:
await ctx.send("That role is already protected.")
else:
protected_roles.append(protect_role.id)
await self.conf.guild(guild).protected_roles.set(protected_roles)
await ctx.send(f"Users with top role '{role}' are protected from color changes.")
@colorme.command(name="unprotect")
@checks.admin_or_permissions(manage_guild=True)
async def _unprotect_colorme(self, ctx, role: str):
"""Remove a role from the list of protected roles.
Example: [p]colorme unprotect admin
"""
guild = ctx.message.guild
protect_role = discord.utils.get(guild.roles, name=role)
if protect_role is None:
return await ctx.send("No roles match that name.")
protected_roles = await self.conf.guild(guild).protected_roles()
if protect_role.id not in protected_roles:
await ctx.send("That role is not currently protected.")
else:
protected_roles.remove(protect_role.id)
await self.conf.guild(guild).protected_roles.set(protected_roles)
await ctx.send(f"Users with top role '{role}' are no longer protected from color changes.")
@colorme.command(name="listprotect")
async def _listprotect_colorme(self, ctx):
"""Lists roles that are protected from color changes."""
guild = ctx.message.guild
protected_roles = await self.conf.guild(guild).protected_roles()
msg_text = "Protected role(s): "
if len(protected_roles) == 0:
msg_text += "None "
for role in protected_roles:
protected_role = discord.utils.get(guild.roles, id=role)
if protected_role is not None:
msg_text += " '" + protected_role.name + "',"
msg_text = msg_text[:-1] + "."
await ctx.send(msg_text)
| 42.47191 | 149 | 0.564418 |
c793c5231efb6fe812b658ff09ba0b1e8e0cc07b
| 10,148 |
py
|
Python
|
google/ads/googleads/v9/services/services/video_service/transports/grpc.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v9/services/services/video_service/transports/grpc.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v9/services/services/video_service/transports/grpc.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v9.resources.types import video
from google.ads.googleads.v9.services.types import video_service
from .base import VideoServiceTransport, DEFAULT_CLIENT_INFO
class VideoServiceGrpcTransport(VideoServiceTransport):
"""gRPC backend transport for VideoService.
Service to manage videos.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
def close(self):
self.grpc_channel.close()
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_video(
self,
) -> Callable[[video_service.GetVideoRequest], video.Video]:
r"""Return a callable for the get video method over gRPC.
Returns the requested video in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetVideoRequest],
~.Video]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_video" not in self._stubs:
self._stubs["get_video"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v9.services.VideoService/GetVideo",
request_serializer=video_service.GetVideoRequest.serialize,
response_deserializer=video.Video.deserialize,
)
return self._stubs["get_video"]
__all__ = ("VideoServiceGrpcTransport",)
| 41.08502 | 87 | 0.617363 |
374f88423e012e09b2943b98af78aaed92a28dca
| 1,191 |
py
|
Python
|
stock-trading-news-alert/stock_price_updates.py
|
amgad01/RESful-api-and-flask
|
6061deaab86bd658d5a833262dfcbe431027b07d
|
[
"MIT"
] | 1 |
2021-03-05T18:13:02.000Z
|
2021-03-05T18:13:02.000Z
|
stock-trading-news-alert/stock_price_updates.py
|
amgad01/RESful-api-and-flask
|
6061deaab86bd658d5a833262dfcbe431027b07d
|
[
"MIT"
] | null | null | null |
stock-trading-news-alert/stock_price_updates.py
|
amgad01/RESful-api-and-flask
|
6061deaab86bd658d5a833262dfcbe431027b07d
|
[
"MIT"
] | 1 |
2021-07-25T01:55:12.000Z
|
2021-07-25T01:55:12.000Z
|
import os
import requests
STOCK_ENDPOINT = "https://www.alphavantage.co/query"
def get_stock_latest_info(stock: str):
stock_params = {
"function": "TIME_SERIES_DAILY",
"symbol": stock,
"apikey": os.environ.get("STOCK_API_KEY")
}
response = requests.get(STOCK_ENDPOINT, params=stock_params)
# print(response.json())
data = response.json()["Time Series (Daily)"]
data_list = [value for (key, value) in data.items()]
# getting yesterday's data:
yesterday_data = data_list[0]
yesterday_closing_price = yesterday_data["4. close"]
# getting the day before yesterday's data:
day_before_yesterday_data = data_list[1]
day_before_yesterday_closing_price = day_before_yesterday_data["4. close"]
difference = float(yesterday_closing_price) - float(day_before_yesterday_closing_price)
direction = ""
if difference > 0.0:
direction = '🔼'
elif difference < 0.0:
direction = "🔻"
abs_difference = abs(difference)
latest_closing_price_difference_percentage = round((abs_difference / float(yesterday_closing_price) * 100))
return f"{latest_closing_price_difference_percentage} {direction}"
| 36.090909 | 111 | 0.706129 |
aa4f9ded14047816bc473713b0178f2a33423fb5
| 710 |
py
|
Python
|
jp.atcoder/abc037/abc037_d/15386003.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1 |
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc037/abc037_d/15386003.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1 |
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc037/abc037_d/15386003.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import sys
MOD = 10**9 + 7
def D():
h, w = map(int, sys.stdin.readline().split())
a = [[int(x) for x in sys.stdin.readline().split()] for _ in range(h)]
dyx = [(-1, 0), (0, -1), (1, 0), (0, 1)]
b = sorted((a[i][j], i, j) for i in range(h) for j in range(w))
res = [[1] * w for _ in range(h)]
for val, i, j in b:
for dy, dx in dyx:
y = i + dy
x = j + dx
if 0 <= y < h and 0 <= x < w and a[y][x] > val:
res[y][x] += res[i][j]
res[y][x] %= MOD
tot = 0
for i in range(h):
tot += sum(res[i]) % MOD
tot %= MOD
print(tot)
if __name__ == "__main__":
D()
| 25.357143 | 75 | 0.405634 |
486cc88f1867fc7906b1e9c7686ed746925351c8
| 4,365 |
py
|
Python
|
sdks/python/apache_beam/examples/cookbook/bigquery_side_input.py
|
ravwojdyla/beam
|
fbcde4cdc7d68de8734bf540c079b2747631a854
|
[
"Apache-2.0"
] | 2 |
2017-02-22T03:35:11.000Z
|
2017-04-05T09:38:16.000Z
|
sdks/python/apache_beam/examples/cookbook/bigquery_side_input.py
|
kavyasmj/beam0.6
|
d59dfeb339bd56feb7569531e5c421a297b0d3dc
|
[
"Apache-2.0"
] | 2 |
2017-04-24T20:32:25.000Z
|
2022-03-29T12:59:55.000Z
|
sdks/python/apache_beam/examples/cookbook/bigquery_side_input.py
|
kavyasmj/beam0.6
|
d59dfeb339bd56feb7569531e5c421a297b0d3dc
|
[
"Apache-2.0"
] | 2 |
2019-03-04T02:12:46.000Z
|
2021-08-10T20:29:37.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Dataflow job that uses BigQuery sources as a side inputs.
Illustrates how to insert side-inputs into transforms in three different forms,
as a singleton, as a iterator, and as a list.
This workflow generate a set of tuples of the form (groupId, corpus, word) where
groupId is a generated identifier for the group and corpus and word are randomly
selected from corresponding rows in BQ dataset 'publicdata:samples.shakespeare'.
Users should specify the number of groups to form and optionally a corpus and/or
a word that should be ignored when forming groups.
"""
import argparse
import logging
from random import randrange
import apache_beam as beam
from apache_beam.io import WriteToText
from apache_beam.pvalue import AsList
from apache_beam.pvalue import AsSingleton
from apache_beam.utils.pipeline_options import PipelineOptions
from apache_beam.utils.pipeline_options import SetupOptions
def create_groups(group_ids, corpus, word, ignore_corpus, ignore_word):
"""Generate groups given the input PCollections."""
def attach_corpus_fn(group, corpus, ignore):
selected = None
len_corpus = len(corpus)
while not selected:
c = corpus[randrange(0, len_corpus - 1)].values()[0]
if c != ignore:
selected = c
yield (group, selected)
def attach_word_fn(group, words, ignore):
selected = None
len_words = len(words)
while not selected:
c = words[randrange(0, len_words - 1)].values()[0]
if c != ignore:
selected = c
yield group + (selected,)
return (group_ids
| 'attach corpus' >> beam.FlatMap(
attach_corpus_fn,
AsList(corpus),
AsSingleton(ignore_corpus))
| 'attach word' >> beam.FlatMap(
attach_word_fn,
AsList(word),
AsSingleton(ignore_word)))
def run(argv=None):
"""Run the workflow."""
parser = argparse.ArgumentParser()
parser.add_argument('--output')
parser.add_argument('--ignore_corpus', default='')
parser.add_argument('--ignore_word', default='')
parser.add_argument('--num_groups')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
p = beam.Pipeline(options=pipeline_options)
group_ids = []
for i in xrange(0, int(known_args.num_groups)):
group_ids.append('id' + str(i))
query_corpus = 'select UNIQUE(corpus) from publicdata:samples.shakespeare'
query_word = 'select UNIQUE(word) from publicdata:samples.shakespeare'
ignore_corpus = known_args.ignore_corpus
ignore_word = known_args.ignore_word
pcoll_corpus = p | 'read corpus' >> beam.io.Read(
beam.io.BigQuerySource(query=query_corpus))
pcoll_word = p | 'read_words' >> beam.Read(
beam.io.BigQuerySource(query=query_word))
pcoll_ignore_corpus = p | 'create_ignore_corpus' >> beam.Create(
[ignore_corpus])
pcoll_ignore_word = p | 'create_ignore_word' >> beam.Create([ignore_word])
pcoll_group_ids = p | 'create groups' >> beam.Create(group_ids)
pcoll_groups = create_groups(pcoll_group_ids, pcoll_corpus, pcoll_word,
pcoll_ignore_corpus, pcoll_ignore_word)
# pylint:disable=expression-not-assigned
pcoll_groups | WriteToText(known_args.output)
p.run()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| 35.778689 | 80 | 0.727377 |
ee7ab6aef4ed5ef64d9c66873ee3f9e149a9004f
| 3,277 |
py
|
Python
|
code/omnissiah/const.py
|
DeusMechanicus/Omnissiah
|
8131bf159e4fddf198e8768867c45f79f933b566
|
[
"MIT"
] | 3 |
2022-01-27T04:07:22.000Z
|
2022-03-16T00:40:52.000Z
|
code/omnissiah/const.py
|
DeusMechanicus/Omnissiah
|
8131bf159e4fddf198e8768867c45f79f933b566
|
[
"MIT"
] | null | null | null |
code/omnissiah/const.py
|
DeusMechanicus/Omnissiah
|
8131bf159e4fddf198e8768867c45f79f933b566
|
[
"MIT"
] | null | null | null |
arp_oid = '.1.3.6.1.2.1.4.22.1.2'
snmp_community_infoid = 1
enplug_api_url = 'https://monitoring.enplug.com/v1/edumonitoring/edustatuses/filter'
enplug_control_url = 'https://core.enplug.com/v1/commandreceiver/execute?eduid='
enplug_post_headers = {'Content-Type':'application/json','Authorization':'Bearer {0}'}
enplug_post_payload = {'NetworkId':None}
activaire_api_url = 'https://api.activaire.com/devices'
activaire_api_headers = {'authorizationToken':''}
mist_login_url = 'https://{0}/api/v1/login'
mist_logout_url = 'https://{0}/api/v1/logout'
mist_timeout_connection = 5
mist_timeout_getpost = 30
mist_sessionid_cookie = 'sessionid'
mist_csrftoken_cookie = 'csrftoken'
mist_login_headers = {'Content-Type':'application/json;charset=UTF-8','Accept':'application/json'}
mist_login_body = '{{"email":"{0}","password":"{1}"}}'
mist_cookie_headers = 'sessionid={0}; csrftoken={1}'
mist_self_url = 'https://{0}/api/v1/self'
mist_inventory_url = 'https://{0}/api/v1/orgs/{1}/inventory'
mist_sites_url = 'https://{0}/api/v1/orgs/{1}/sites'
mist_clients_url = 'https://{0}/api/v1/sites/{1}/stats/clients'
mist_host = 'api.mist.com'
mist_devices_url = 'https://{0}/api/v1/sites/{1}/stats/devices'
ruckussz_login_url = 'https://{0}:8443/wsg/api/public/v6_1/session'
ruckussz_wap_url = 'https://{0}:8443/wsg/api/public/v6_1/aps?listSize={1}'
ruckussz_client_url = 'https://{0}:8443/wsg/api/public/v6_1/aps/{1}/operational/client?listSize=1000'
ruckussz_wap_oper_url = 'https://{0}:8443/wsg/api/public/v6_1/aps/{1}/operational/summary'
ruckussz_timeout_connection = 10
ruckussz_timeout_getpost = 90
ruckussz_login_headers = {'Content-Type':'application/json','Accept':'application/json'}
ruckussz_login_body = '{{"username":"{0}","password":"{1}"}}'
ruckussz_sessionid_cookie = 'JSESSIONID'
min_nnml_word_length = 2
max_nnml_word_length = 256
nnml_preprocess_regex = ['[0-9,A-F,a-f][0-9,A-F,a-f]:[0-9,A-F,a-f][0-9,A-F,a-f]:[0-9,A-F,a-f][0-9,A-F,a-f]:[0-9,A-F,a-f][0-9,A-F,a-f]:[0-9,A-F,a-f][0-9,A-F,a-f]:[0-9,A-F,a-f][0-9,A-F,a-f]',
'[0-9,A-F,a-f][0-9,A-F,a-f][0-9,A-F,a-f][0-9,A-F,a-f][0-9,A-F,a-f][0-9,A-F,a-f][0-9,A-F,a-f][0-9,A-F,a-f][0-9,A-F,a-f][0-9,A-F,a-f][0-9,A-F,a-f][0-9,A-F,a-f]',
'[0-9,A-F,a-f]+:[0-9,A-F,a-f]+:[0-9,A-F,a-f]+:[0-9,A-F,a-f]+:[0-9,A-F,a-f]+:[0-9,A-F,a-f]+:[0-9,A-F,a-f]+:[0-9,A-F,a-f]+',
'\d+d\d+h\d+m\d+s', '\d+h\d+m\d+s', '\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d', '\d\d\d\d-\d\d-\d\dT', '\d\d\d\d-\d\d-\d\d', '\d+\.\d+\.\d+\.\d+', '\d\d:\d\d:\d\d', '\d\d:\d\d',
'\d\d-\d\d-\d\d', 'node_session=[^\;]+;,',
'([0-9,A-F,a-f]+:+){2,7}[0-9,A-F,a-f]+', '([0-9,A-F,a-f][0-9,A-F,a-f][0-9,A-F,a-f][0-9,A-F,a-f] ){2,9}[0-9,A-F,a-f][0-9,A-F,a-f][0-9,A-F,a-f][0-9,A-F,a-f]',
'\d{10,11}z']
nnml_manufacturers_dropout = 0.05
nnml_manufacturers_trains = [{'epochs':128, 'batches':128, 'lr':0.0005}, {'epochs':8, 'batches':0, 'lr':0.0005},
{'epochs':32, 'batches':128, 'lr':0.0002}, {'epochs':8, 'batches':0, 'lr':0.0002}]
nnml_devicetypes_dropout = 0.05
nnml_devicetypes_trains = [{'epochs':64, 'batches':256, 'lr':0.0005}, {'epochs':8, 'batches':0, 'lr':0.0005},
{'epochs':32, 'batches':128, 'lr':0.0002}, {'epochs':8, 'batches':0, 'lr':0.0002}]
zbx_zabbix_timeout = 600
zbx_update_zbx_omni_map_sql = 'UPDATE zbx_omni_map SET omniid={0}, zbxid={1} WHERE mapid={2};'
| 58.517857 | 189 | 0.647543 |
1ab425fe4f0d6629c6c55fcb3067bd8480825176
| 831 |
py
|
Python
|
sanalberto/views/shop.py
|
DarkoR12/dafi-system
|
f923ea4273b04f7acc7016b2f7d03e51eb00b85b
|
[
"MIT"
] | null | null | null |
sanalberto/views/shop.py
|
DarkoR12/dafi-system
|
f923ea4273b04f7acc7016b2f7d03e51eb00b85b
|
[
"MIT"
] | null | null | null |
sanalberto/views/shop.py
|
DarkoR12/dafi-system
|
f923ea4273b04f7acc7016b2f7d03e51eb00b85b
|
[
"MIT"
] | null | null | null |
from django.utils import timezone
from django.views.generic import ListView, TemplateView
from meta.views import MetadataMixin
from .common import EventMixin
class ShopMixin(EventMixin):
'''Shop mixin'''
title = 'Tienda'
check_event_redirect = 'sanalberto:shop_closed'
def check_event(self, event):
return event.shop_enabled
class ShopIndexView(ShopMixin, MetadataMixin, ListView):
'''Shop index view'''
pass
class ShopClosedView(EventMixin, MetadataMixin, TemplateView):
'''Shop closed alert view'''
template_name = 'sanalberto/shop_closed.html'
title = 'Tienda cerrada'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['soon'] = self.get_current_event().selling_start > timezone.now()
return context
| 22.459459 | 81 | 0.712395 |
0f266707f06292fc2204cecf77201891b5b1c6e9
| 1,118 |
py
|
Python
|
thumbor_padding_filter/padding.py
|
arcivr/thumbor-padding
|
f1b7e5deadd665d2486b10b7f2b0977a49e7216b
|
[
"MIT"
] | 2 |
2021-09-07T15:03:10.000Z
|
2021-09-07T19:17:28.000Z
|
thumbor_padding_filter/padding.py
|
arcivr/thumbor-padding
|
f1b7e5deadd665d2486b10b7f2b0977a49e7216b
|
[
"MIT"
] | 1 |
2022-01-24T02:40:53.000Z
|
2022-01-24T16:46:35.000Z
|
thumbor_padding_filter/padding.py
|
arcivr/thumbor-padding
|
f1b7e5deadd665d2486b10b7f2b0977a49e7216b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from thumbor.filters import BaseFilter, filter_method
class Filter(BaseFilter):
"""
Pads the image with optional color.
Usage: /filters:padding(<left>, <top>, <right>, <bottom> [, <color>])
Examples of use:
/filters:padding(10, 10, 20, 20)/
/filters:padding(10, 10, 20, 20, eee)/
"""
@filter_method(
BaseFilter.PositiveNumber,
BaseFilter.PositiveNumber,
BaseFilter.PositiveNumber,
BaseFilter.PositiveNumber,
BaseFilter.String
)
def padding(self, left, top, right, bottom, color='fff'):
offset_x = left
offset_y = top
new_width = self.engine.size[0] + left + right
new_height = self.engine.size[1] + top + bottom
new_engine = self.context.modules.engine.__class__(self.context)
new_engine.image = new_engine.gen_image((new_width, new_height), "#" + color)
new_engine.enable_alpha()
new_engine.paste(self.engine, (offset_x, offset_y))
self.engine.image = new_engine.image
| 30.216216 | 85 | 0.623435 |
4a2e7f4b4e6efdcf39748b6389e44b8de8d538fb
| 1,412 |
py
|
Python
|
python-basic/class/property_basic.py
|
nkhn37/python-tech-sample-source
|
e8aea7ed3d810494682b3c2dde952ddd0f7acf84
|
[
"MIT"
] | null | null | null |
python-basic/class/property_basic.py
|
nkhn37/python-tech-sample-source
|
e8aea7ed3d810494682b3c2dde952ddd0f7acf84
|
[
"MIT"
] | null | null | null |
python-basic/class/property_basic.py
|
nkhn37/python-tech-sample-source
|
e8aea7ed3d810494682b3c2dde952ddd0f7acf84
|
[
"MIT"
] | null | null | null |
"""クラス基礎
クラスのプロパティ(property)の使い方
[説明ページ]
https://tech.nkhn37.net/python-class-property/
"""
class Person(object):
def __init__(self, first_name=None, last_name=None, age=None):
self.__first_name = first_name
self.__last_name = last_name
self.__age = age
@property
def first_name(self):
return self.__first_name
@property
def last_name(self):
return self.__last_name
@property
def age(self):
return self.__age
@first_name.setter
def first_name(self, value):
self.__first_name = value
@last_name.setter
def last_name(self, value):
self.__last_name = value
@age.setter
def age(self, value):
if value >= 0:
self.__age = value
else:
raise ValueError('Only values greater than or equal to 0 '
'are allowed.')
def myname(self):
print(f'私の名前は、{self.__last_name}{self.__first_name}、'
f'{self.__age}歳です。')
def main():
person1 = Person('太郎', '田中', 20)
print(person1.first_name)
print(person1.last_name)
print(person1.age)
person1.myname()
print('===')
person1.first_name = '次郎'
person1.last_name = '佐藤'
person1.age = 15
print(person1.first_name)
print(person1.last_name)
print(person1.age)
person1.myname()
if __name__ == '__main__':
main()
| 21.074627 | 70 | 0.604816 |
e27f2a8e58d854de1ddad7c6611be1aea6166e80
| 4,224 |
py
|
Python
|
complaints/models.py
|
Apfirebolt/digifix-electronics-repair-service-portal-
|
087f656f850877b38434646559cfbeec4a33837f
|
[
"MIT"
] | null | null | null |
complaints/models.py
|
Apfirebolt/digifix-electronics-repair-service-portal-
|
087f656f850877b38434646559cfbeec4a33837f
|
[
"MIT"
] | 6 |
2021-04-08T21:39:19.000Z
|
2022-03-12T00:44:03.000Z
|
complaints/models.py
|
Apfirebolt/digifix-electronics-repair-service-portal-
|
087f656f850877b38434646559cfbeec4a33837f
|
[
"MIT"
] | null | null | null |
from django.db import models
from digifix_computer_repair.settings import AUTH_USER_MODEL
from django.db.models import Q
STATUS_CHOICES = (
("NOT YET OPENED", "Not Yet Opened"),
("OPENED", "Opened"),
("IN PROGRESS", "In Progress"),
("HAS ISSUES", "Has Issues"),
("FIXED", "Fixed"),
)
DEVICE_TYPE_CHOICES = (
("DESKTOP", "Desktop"),
("LAPTOP", "Laptop"),
("Playstation", "Playstation"),
("Mobile", "Mobile"),
("Headphone", "Headphone"),
("Keyboard", "Keyboard"),
("Others", "Others"),
)
RATING_CHOICES = [(i, i) for i in range(6)]
class Complaint(models.Model):
created_by = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='complaints_created')
assigned_engineer = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE,
limit_choices_to=Q(groups__name='Engineers'), related_name='assignments',
null=True, blank=True)
created_at = models.DateTimeField(auto_now=True)
description = models.TextField()
is_resolved = models.BooleanField(default=False)
has_issues = models.BooleanField(default=False)
status = models.CharField(max_length=150, choices=STATUS_CHOICES, default="Not Yet Opened")
status_text = models.TextField('Status Text', blank=True, null=True)
device_type = models.CharField('Device Type', max_length=100, choices=DEVICE_TYPE_CHOICES, null=True, blank=True)
reference_id = models.CharField('Reference ID', max_length=100, null=True, blank=True)
def __str__(self):
return 'Description (%s, %s)' % (self.created_by.username, self.device_type)
class Meta:
verbose_name_plural = "Complaints"
class ComplaintImages(models.Model):
related_complaint = models.ForeignKey(Complaint, on_delete=models.CASCADE, related_name='all_images')
gadget_image = models.ImageField(upload_to='complaint_images')
image_description = models.TextField()
def __str__(self):
return str(self.related_complaint.id) + ' - ' + str(self.image_description)
class Meta:
verbose_name_plural = "Complaint Images"
class ReportIssue(models.Model):
related_complaint = models.ForeignKey(Complaint, on_delete=models.CASCADE, related_name='all_issues')
description = models.TextField()
reported_at = models.DateTimeField(auto_now=True)
written_by = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='all_user_issues', null=True,
blank=True)
def __str__(self):
return str(self.related_complaint.id) + ' - ' + str(self.description)
class Meta:
verbose_name_plural = "Complaint Issues"
class Comments(models.Model):
related_complaint = models.ForeignKey(Complaint, on_delete=models.CASCADE, related_name='all_thread_comments')
description = models.TextField()
posted_at = models.DateTimeField(auto_now=True)
written_by = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='all_comments', null=True,
blank=True)
def __str__(self):
return str(self.related_complaint.id) + ' - ' + str(self.description)
class Meta:
verbose_name_plural = "Complaint Comments"
class UserAddress(models.Model):
address = models.CharField(max_length=200)
owner = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='all_addresses')
is_primary = models.BooleanField(default=False)
def __str__(self):
return str(self.address) + ' - ' + str(self.owner.username)
class Meta:
verbose_name_plural = "User Addresses"
class UserTestimonials(models.Model):
written_by = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='user_testimonial')
content = models.TextField("Testimonial Content")
posted_at = models.DateTimeField(auto_now=True)
service_rating = models.IntegerField("Service Ratings", choices=RATING_CHOICES, null=True, blank=True)
def __str__(self):
return str(self.written_by.username) + ' - ' + str(self.content)
class Meta:
verbose_name_plural = "User Testimonials"
| 38.4 | 120 | 0.696733 |
1322d5923b4191509c5990661e3081a5e9e73315
| 2,730 |
py
|
Python
|
qtoolkit/maths/matrix/su2/similarity_matrix.py
|
nelimee/qtoolkit
|
1e99bd7d3a143a327c3bb92595ea88ec12dbdb89
|
[
"CECILL-B"
] | 3 |
2018-12-30T04:50:44.000Z
|
2019-12-25T12:26:02.000Z
|
qtoolkit/maths/matrix/su2/similarity_matrix.py
|
nelimee/qtoolkit
|
1e99bd7d3a143a327c3bb92595ea88ec12dbdb89
|
[
"CECILL-B"
] | null | null | null |
qtoolkit/maths/matrix/su2/similarity_matrix.py
|
nelimee/qtoolkit
|
1e99bd7d3a143a327c3bb92595ea88ec12dbdb89
|
[
"CECILL-B"
] | 1 |
2021-08-08T15:59:46.000Z
|
2021-08-08T15:59:46.000Z
|
# ======================================================================
# Copyright CERFACS (October 2018)
# Contributor: Adrien Suau ([email protected])
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/or redistribute the software under the terms of the
# CeCILL-B license as circulated by CEA, CNRS and INRIA at the following
# URL "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided
# only with a limited warranty and the software's author, the holder of
# the economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards
# their requirements in conditions enabling the security of their
# systems and/or data to be ensured and, more generally, to use and
# operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
# ======================================================================
import numpy
import qtoolkit.maths.matrix.su2.transformations as su2trans
import qtoolkit.utils.types as qtypes
def similarity_matrix(A: qtypes.SU2Matrix, B: qtypes.SU2Matrix) -> qtypes.SU2Matrix:
"""Find :math:`S \\in SU(2) \\mid A = S B S^\\dagger`.
:param A: First :math:`SU(2)` matrix.
:param B: Second :math:`SU(2)` matrix.
:return: the :math:`SU(2)` matrix :math:`S`.
"""
a, b = su2trans.su2_to_so3(A), su2trans.su2_to_so3(B)
norm_a, norm_b = numpy.linalg.norm(a, 2), numpy.linalg.norm(b, 2)
s = numpy.cross(b, a)
norm_s = numpy.linalg.norm(s, 2)
if norm_s == 0:
# The representative vectors are too close to each other, this
# means that the original matrices are also very close, and so
# returning the identity matrix is fine.
return numpy.identity(2)
angle_between_a_and_b = numpy.arccos(numpy.inner(a, b) / (norm_a * norm_b))
s *= angle_between_a_and_b / norm_s
S = su2trans.so3_to_su2(s)
return S
| 44.032258 | 84 | 0.679121 |
c229ea7422c8dd8e50880574edc6d5fd82b8e4ba
| 19,536 |
py
|
Python
|
pvlib/tests/test_tracking.py
|
kyeling/pvlib-python
|
e3a3be970c44d227b6e49ea536e76be75689c7ab
|
[
"BSD-3-Clause"
] | null | null | null |
pvlib/tests/test_tracking.py
|
kyeling/pvlib-python
|
e3a3be970c44d227b6e49ea536e76be75689c7ab
|
[
"BSD-3-Clause"
] | null | null | null |
pvlib/tests/test_tracking.py
|
kyeling/pvlib-python
|
e3a3be970c44d227b6e49ea536e76be75689c7ab
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from numpy import nan
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from numpy.testing import assert_allclose
from pvlib.location import Location
from pvlib import tracking
SINGLEAXIS_COL_ORDER = ['tracker_theta', 'aoi',
'surface_azimuth', 'surface_tilt']
def test_solar_noon():
index = pd.date_range(start='20180701T1200', freq='1s', periods=1)
apparent_zenith = pd.Series([10], index=index)
apparent_azimuth = pd.Series([180], index=index)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'tracker_theta': 0, 'aoi': 10,
'surface_azimuth': 90, 'surface_tilt': 0},
index=index, dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_scalars():
apparent_zenith = 10
apparent_azimuth = 180
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
assert isinstance(tracker_data, dict)
expect = {'tracker_theta': 0, 'aoi': 10, 'surface_azimuth': 90,
'surface_tilt': 0}
for k, v in expect.items():
assert_allclose(tracker_data[k], v)
def test_arrays():
apparent_zenith = np.array([10])
apparent_azimuth = np.array([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
assert isinstance(tracker_data, dict)
expect = {'tracker_theta': 0, 'aoi': 10, 'surface_azimuth': 90,
'surface_tilt': 0}
for k, v in expect.items():
assert_allclose(tracker_data[k], v)
def test_nans():
apparent_zenith = np.array([10, np.nan, 10])
apparent_azimuth = np.array([180, 180, np.nan])
with np.errstate(invalid='ignore'):
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = {'tracker_theta': np.array([0, nan, nan]),
'aoi': np.array([10, nan, nan]),
'surface_azimuth': np.array([90, nan, nan]),
'surface_tilt': np.array([0, nan, nan])}
for k, v in expect.items():
assert_allclose(tracker_data[k], v)
# repeat with Series because nans can differ
apparent_zenith = pd.Series(apparent_zenith)
apparent_azimuth = pd.Series(apparent_azimuth)
with np.errstate(invalid='ignore'):
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame(np.array(
[[ 0., 10., 90., 0.],
[nan, nan, nan, nan],
[nan, nan, nan, nan]]),
columns=['tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt'])
assert_frame_equal(tracker_data, expect)
def test_arrays_multi():
apparent_zenith = np.array([[10, 10], [10, 10]])
apparent_azimuth = np.array([[180, 180], [180, 180]])
# singleaxis should fail for num dim > 1
with pytest.raises(ValueError):
tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
# uncomment if we ever get singleaxis to support num dim > 1 arrays
# assert isinstance(tracker_data, dict)
# expect = {'tracker_theta': np.full_like(apparent_zenith, 0),
# 'aoi': np.full_like(apparent_zenith, 10),
# 'surface_azimuth': np.full_like(apparent_zenith, 90),
# 'surface_tilt': np.full_like(apparent_zenith, 0)}
# for k, v in expect.items():
# assert_allclose(tracker_data[k], v)
def test_azimuth_north_south():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=180,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'tracker_theta': -60, 'aoi': 0,
'surface_azimuth': 90, 'surface_tilt': 60},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect['tracker_theta'] *= -1
assert_frame_equal(expect, tracker_data)
def test_max_angle():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=45, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 15, 'surface_azimuth': 90,
'surface_tilt': 45, 'tracker_theta': 45},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_backtrack():
apparent_zenith = pd.Series([80])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=False,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 90,
'surface_tilt': 80, 'tracker_theta': 80},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 52.5716, 'surface_azimuth': 90,
'surface_tilt': 27.42833, 'tracker_theta': 27.4283},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_axis_tilt():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([135])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=30, axis_azimuth=180,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 7.286245, 'surface_azimuth': 142.65730,
'surface_tilt': 35.98741,
'tracker_theta': -20.88121},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=30, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 47.6632, 'surface_azimuth': 50.96969,
'surface_tilt': 42.5152, 'tracker_theta': 31.6655},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_axis_azimuth():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 30, 'surface_azimuth': 180,
'surface_tilt': 0, 'tracker_theta': 0},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 180,
'surface_tilt': 30, 'tracker_theta': 30},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_horizon_flat():
# GH 569
solar_azimuth = np.array([0, 180, 359])
solar_zenith = np.array([100, 45, 100])
solar_azimuth = pd.Series(solar_azimuth)
solar_zenith = pd.Series(solar_zenith)
# depending on platform and numpy versions this will generate
# RuntimeWarning: invalid value encountered in > < >=
out = tracking.singleaxis(solar_zenith, solar_azimuth, axis_tilt=0,
axis_azimuth=180, backtrack=False, max_angle=180)
expected = pd.DataFrame(np.array(
[[ nan, nan, nan, nan],
[ 0., 45., 270., 0.],
[ nan, nan, nan, nan]]),
columns=['tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt'])
assert_frame_equal(out, expected)
def test_horizon_tilted():
# GH 569
solar_azimuth = np.array([0, 180, 359])
solar_zenith = np.full_like(solar_azimuth, 45)
solar_azimuth = pd.Series(solar_azimuth)
solar_zenith = pd.Series(solar_zenith)
out = tracking.singleaxis(solar_zenith, solar_azimuth, axis_tilt=90,
axis_azimuth=180, backtrack=False, max_angle=180)
expected = pd.DataFrame(np.array(
[[ 180., 45., 0., 90.],
[ 0., 45., 180., 90.],
[ 179., 45., 359., 90.]]),
columns=['tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt'])
assert_frame_equal(out, expected)
def test_low_sun_angles():
# GH 656
result = tracking.singleaxis(
apparent_zenith=80, apparent_azimuth=338, axis_tilt=30,
axis_azimuth=180, max_angle=60, backtrack=True, gcr=0.35)
expected = {
'tracker_theta': np.array([-50.31051385]),
'aoi': np.array([61.35300178]),
'surface_azimuth': np.array([112.53615425]),
'surface_tilt': np.array([56.42233095])}
for k, v in result.items():
assert_allclose(expected[k], v)
def test_SingleAxisTracker_creation():
system = tracking.SingleAxisTracker(max_angle=45,
gcr=.25,
module='blah',
inverter='blarg')
assert system.max_angle == 45
assert system.gcr == .25
assert system.module == 'blah'
assert system.inverter == 'blarg'
def test_SingleAxisTracker_tracking():
system = tracking.SingleAxisTracker(max_angle=90, axis_tilt=30,
axis_azimuth=180, gcr=2.0/7.0,
backtrack=True)
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([135])
tracker_data = system.singleaxis(apparent_zenith, apparent_azimuth)
expect = pd.DataFrame({'aoi': 7.286245, 'surface_azimuth': 142.65730,
'surface_tilt': 35.98741,
'tracker_theta': -20.88121},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
# results calculated using PVsyst
pvsyst_solar_azimuth = 7.1609
pvsyst_solar_height = 27.315
pvsyst_axis_tilt = 20.
pvsyst_axis_azimuth = 20.
pvsyst_system = tracking.SingleAxisTracker(
max_angle=60., axis_tilt=pvsyst_axis_tilt,
axis_azimuth=180+pvsyst_axis_azimuth, backtrack=False)
# the definition of azimuth is different from PYsyst
apparent_azimuth = pd.Series([180+pvsyst_solar_azimuth])
apparent_zenith = pd.Series([90-pvsyst_solar_height])
tracker_data = pvsyst_system.singleaxis(apparent_zenith, apparent_azimuth)
expect = pd.DataFrame({'aoi': 41.07852, 'surface_azimuth': 180-18.432,
'surface_tilt': 24.92122,
'tracker_theta': -15.18391},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_LocalizedSingleAxisTracker_creation():
localized_system = tracking.LocalizedSingleAxisTracker(latitude=32,
longitude=-111,
module='blah',
inverter='blarg')
assert localized_system.module == 'blah'
assert localized_system.inverter == 'blarg'
assert localized_system.latitude == 32
assert localized_system.longitude == -111
def test_SingleAxisTracker_localize():
system = tracking.SingleAxisTracker(max_angle=45, gcr=.25,
module='blah', inverter='blarg')
localized_system = system.localize(latitude=32, longitude=-111)
assert localized_system.module == 'blah'
assert localized_system.inverter == 'blarg'
assert localized_system.latitude == 32
assert localized_system.longitude == -111
def test_SingleAxisTracker_localize_location():
system = tracking.SingleAxisTracker(max_angle=45, gcr=.25,
module='blah', inverter='blarg')
location = Location(latitude=32, longitude=-111)
localized_system = system.localize(location=location)
assert localized_system.module == 'blah'
assert localized_system.inverter == 'blarg'
assert localized_system.latitude == 32
assert localized_system.longitude == -111
# see test_irradiance for more thorough testing
def test_get_aoi():
system = tracking.SingleAxisTracker(max_angle=90, axis_tilt=30,
axis_azimuth=180, gcr=2.0/7.0,
backtrack=True)
surface_tilt = np.array([30, 0])
surface_azimuth = np.array([90, 270])
solar_zenith = np.array([70, 10])
solar_azimuth = np.array([100, 180])
out = system.get_aoi(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth)
expected = np.array([40.632115, 10.])
assert_allclose(out, expected, atol=0.000001)
def test_get_irradiance():
system = tracking.SingleAxisTracker(max_angle=90, axis_tilt=30,
axis_azimuth=180, gcr=2.0/7.0,
backtrack=True)
times = pd.date_range(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
# latitude=32, longitude=-111
solar_position = pd.DataFrame(np.array(
[[55.36421554, 55.38851771, 34.63578446, 34.61148229,
172.32003763, -3.44516534],
[96.50000401, 96.50000401, -6.50000401, -6.50000401,
246.91581654, -3.56292888]]),
columns=['apparent_zenith', 'zenith', 'apparent_elevation',
'elevation', 'azimuth', 'equation_of_time'],
index=times)
irrads = pd.DataFrame({'dni': [900, 0], 'ghi': [600, 0], 'dhi': [100, 0]},
index=times)
solar_zenith = solar_position['apparent_zenith']
solar_azimuth = solar_position['azimuth']
# invalid warnings already generated in horizon test above,
# no need to clutter test output here
with np.errstate(invalid='ignore'):
tracker_data = system.singleaxis(solar_zenith, solar_azimuth)
# some invalid values in irradiance.py. not our problem here
with np.errstate(invalid='ignore'):
irradiance = system.get_irradiance(tracker_data['surface_tilt'],
tracker_data['surface_azimuth'],
solar_zenith,
solar_azimuth,
irrads['dni'],
irrads['ghi'],
irrads['dhi'])
expected = pd.DataFrame(data=np.array(
[[961.80070, 815.94490, 145.85580, 135.32820, 10.52757492],
[nan, nan, nan, nan, nan]]),
columns=['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse'],
index=times)
assert_frame_equal(irradiance, expected, check_less_precise=2)
def test_SingleAxisTracker___repr__():
system = tracking.SingleAxisTracker(max_angle=45, gcr=.25,
module='blah', inverter='blarg')
expected = ('SingleAxisTracker: \n axis_tilt: 0\n axis_azimuth: 0\n '
'max_angle: 45\n backtrack: True\n gcr: 0.25\n '
'name: None\n surface_tilt: None\n surface_azimuth: None\n '
'module: blah\n inverter: blarg\n albedo: 0.25\n '
'racking_model: open_rack')
assert system.__repr__() == expected
def test_LocalizedSingleAxisTracker___repr__():
localized_system = tracking.LocalizedSingleAxisTracker(latitude=32,
longitude=-111,
module='blah',
inverter='blarg',
gcr=0.25)
expected = ('LocalizedSingleAxisTracker: \n axis_tilt: 0\n '
'axis_azimuth: 0\n max_angle: 90\n backtrack: True\n '
'gcr: 0.25\n name: None\n surface_tilt: None\n '
'surface_azimuth: None\n module: blah\n inverter: blarg\n '
'albedo: 0.25\n racking_model: open_rack\n '
'latitude: 32\n longitude: -111\n altitude: 0\n tz: UTC')
assert localized_system.__repr__() == expected
| 41.922747 | 79 | 0.558251 |
783f205fc276b4c2b2be68399622c3add941db1c
| 2,297 |
py
|
Python
|
hedp/rh.py
|
luli/hedp
|
ab78879106ef2d7b6e54ac6a69d24439ec8c9a8b
|
[
"CECILL-B"
] | 9 |
2015-04-07T12:45:40.000Z
|
2020-10-26T14:40:49.000Z
|
hedp/rh.py
|
luli/hedp
|
ab78879106ef2d7b6e54ac6a69d24439ec8c9a8b
|
[
"CECILL-B"
] | 9 |
2015-10-20T13:01:09.000Z
|
2016-09-09T15:24:36.000Z
|
hedp/rh.py
|
luli/hedp
|
ab78879106ef2d7b6e54ac6a69d24439ec8c9a8b
|
[
"CECILL-B"
] | 12 |
2015-12-17T14:24:29.000Z
|
2021-04-26T13:42:48.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright CNRS 2012
# Roman Yurchak (LULI)
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software.
from .cst import eV2K
import numpy as np
from scipy.constants import m_p, k as k_b
from scipy.optimize import brentq
def rh_shock_temperature(rho_2, u_s, Z, Abar,
gamma=None, eos_tab=None, T_min=0.01, T_max=1000):
"""
Calculate the post-shock temperature from the shock velocity with Rankine-Hugoniot equations. Uses Thomas-Fermi formula for the plasma ionization.
Source: "High-Energy-Density Physics Fundamentals, Inertial Fusion, and Experimental Astrophysics"
R.P.Drake 2006, eqn. (4.20), page 116.
Parameters
----------
- rho_2 : (float) post-shock density [g/cm^3]
- u_s : (float) shock velocity (in the shock frame) [km/s]
- Z : (float) atomic number of the element (can be non integer for mixtures)
- Abar : (float) meat atomic mass
- T_min, T_max: (floats) optional bounds for the temperature in eV
- gamma : [optional] (float) adiabatic index
- eos_tab: [optional] (eospac.TableBase) the EoS table object used to calculate gamma if provided
Returns
-------
- temp: (float) temperature in eV
"""
RHO_MIN = 1e-10
TEMP_MIN = 1e-4 # floors on the possible temperature and density
if (gamma is None) == (eos_tab is None): # a !xor operation
raise ValueError('Either eos_tab or gamma argument should be provided')
def solve_h(T_eV, rho_2, u_s, Z, Abar, gamma=gamma, eos_tab=eos_tab):
from hedp.eos.ionization import thomas_fermi_ionization
T_eV = max(T_eV, TEMP_MIN)
rho_2 = max(rho_2, RHO_MIN)
Zbar = thomas_fermi_ionization(np.array([rho_2]), np.array([T_eV]), Z, Abar)
if eos_tab is not None:
gamma = 1 + eos_tab.q['Gamma', 't'](np.array([rho_2]), np.array([T_eV*eV2K]))
err = T_eV \
- Abar*m_p/(1 + Zbar)*(u_s*1e3)**2*\
2*(gamma - 1)/((gamma + 1)**2*(k_b*eV2K))
return err
res = brentq(solve_h, T_min, T_max,
args=(rho_2, u_s, Z, Abar, gamma, eos_tab))
return res
| 37.655738 | 150 | 0.629081 |
baac2bf5e4e99c938c08d4295e2fad8a914fcc33
| 6,390 |
py
|
Python
|
lib/charms/traefik_route_k8s/v0/traefik_route.py
|
canonical/traefik-k8s-operator
|
f493a7b5731e08490ba1909fd1032d4e1c074b11
|
[
"Apache-2.0"
] | 1 |
2022-02-19T00:59:25.000Z
|
2022-02-19T00:59:25.000Z
|
lib/charms/traefik_route_k8s/v0/traefik_route.py
|
canonical/traefik-k8s-operator
|
f493a7b5731e08490ba1909fd1032d4e1c074b11
|
[
"Apache-2.0"
] | 27 |
2022-01-20T16:21:41.000Z
|
2022-03-30T13:43:43.000Z
|
lib/charms/traefik_route_k8s/v0/traefik_route.py
|
canonical/traefik-k8s-operator
|
f493a7b5731e08490ba1909fd1032d4e1c074b11
|
[
"Apache-2.0"
] | 4 |
2022-01-25T22:22:37.000Z
|
2022-03-14T09:04:52.000Z
|
# Copyright 2022 Canonical Ltd.
# See LICENSE file for licensing details.
r"""# Interface Library for traefik_route.
This library wraps relation endpoints for traefik_route. The requirer of this
relation is the traefik-route-k8s charm, or any charm capable of providing
Traefik configuration files. The provider is the traefik-k8s charm, or another
charm willing to consume Traefik configuration files.
## Getting Started
To get started using the library, you just need to fetch the library using `charmcraft`.
```shell
cd some-charm
charmcraft fetch-lib charms.traefik_route_k8s.v0.traefik_route
```
To use the library from the provider side (Traefik):
```yaml
requires:
traefik_route:
interface: traefik_route
limit: 1
```
```python
from charms.traefik_route_k8s.v0.traefik_route import TraefikRouteProvider
class TraefikCharm(CharmBase):
def __init__(self, *args):
# ...
self.traefik_route = TraefikRouteProvider(self)
self.framework.observe(
self.traefik_route.on.ready, self._handle_traefik_route_ready
)
def _handle_traefik_route_ready(self, event):
config: str = self.traefik_route.get_config(event.relation) # yaml
# use config to configure Traefik
```
To use the library from the requirer side (TraefikRoute):
```yaml
requires:
traefik-route:
interface: traefik_route
limit: 1
optional: false
```
```python
# ...
from charms.traefik_route_k8s.v0.traefik_route import TraefikRouteRequirer
class TraefikRouteCharm(CharmBase):
def __init__(self, *args):
# ...
traefik_route = TraefikRouteRequirer(
self, self.model.relations.get("traefik-route"),
"traefik-route"
)
if traefik_route.is_ready():
traefik_route.submit_to_traefik(
config={'my': {'traefik': 'configuration'}}
)
```
"""
import logging
from typing import Optional
import yaml
from ops.charm import CharmBase, RelationEvent, CharmEvents
from ops.framework import EventSource, Object
from ops.model import Relation
# The unique Charmhub library identifier, never change it
LIBID = "fe2ac43a373949f2bf61383b9f35c83c"
# Increment this major API version when introducing breaking changes
LIBAPI = 0
# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version
LIBPATCH = 1
log = logging.getLogger(__name__)
class TraefikRouteException(RuntimeError):
"""Base class for exceptions raised by TraefikRoute."""
class UnauthorizedError(TraefikRouteException):
"""Raised when the unit needs leadership to perform some action."""
class TraefikRouteProviderReadyEvent(RelationEvent):
"""Event emitted when Traefik is ready to provide ingress for a routed unit."""
class TraefikRouteRequirerReadyEvent(RelationEvent):
"""Event emitted when a unit requesting ingress has provided all data Traefik needs."""
class TraefikRouteRequirerEvents(CharmEvents):
"""Container for TraefikRouteRequirer events."""
ready = EventSource(TraefikRouteRequirerReadyEvent)
class TraefikRouteProviderEvents(CharmEvents):
"""Container for TraefikRouteProvider events."""
ready = EventSource(TraefikRouteProviderReadyEvent)
class TraefikRouteProvider(Object):
"""Implementation of the provider of traefik_route.
This will presumably be owned by a Traefik charm.
The main idea is that Traefik will observe the `ready` event and, upon
receiving it, will fetch the config from the TraefikRoute's application databag,
apply it, and update its own app databag to let Route know that the ingress
is there.
The TraefikRouteProvider provides api to do this easily.
"""
on = TraefikRouteProviderEvents()
def __init__(self, charm: CharmBase, relation_name: str = "traefik-route"):
"""Constructor for TraefikRouteProvider.
Args:
charm: The charm that is instantiating the instance.
relation_name: The name of the relation relation_name to bind to
(defaults to "traefik-route").
"""
super().__init__(charm, relation_name)
self.charm = charm
self.framework.observe(
self.charm.on[relation_name].relation_changed, self._on_relation_changed
)
def _on_relation_changed(self, event: RelationEvent):
if self.is_ready(event.relation):
# todo check data is valid here?
self.on.ready.emit(event.relation)
@staticmethod
def is_ready(relation: Relation) -> bool:
"""Whether TraefikRoute is ready on this relation: i.e. the remote app shared the config."""
return "config" in relation.data[relation.app]
@staticmethod
def get_config(relation: Relation) -> Optional[str]:
"""Retrieve the config published by the remote application."""
# todo validate this config
return relation.data[relation.app].get("config")
class TraefikRouteRequirer(Object):
"""Wrapper for the requirer side of traefik-route.
The traefik_route requirer will publish to the application databag an object like:
{
'config': <Traefik_config>
}
NB: TraefikRouteRequirer does no validation; it assumes that the
traefik-route-k8s charm will provide valid yaml-encoded config.
The TraefikRouteRequirer provides api to store this config in the
application databag.
"""
on = TraefikRouteRequirerEvents()
def __init__(self, charm: CharmBase, relation: Relation, relation_name: str = "traefik-route"):
super(TraefikRouteRequirer, self).__init__(charm, relation_name)
self._charm = charm
self._relation = relation
def is_ready(self) -> bool:
"""Is the TraefikRouteRequirer ready to submit data to Traefik?"""
return self._relation is not None
def submit_to_traefik(self, config):
"""Relay an ingress configuration data structure to traefik.
This will publish to TraefikRoute's traefik-route relation databag
the config traefik needs to route the units behind this charm.
"""
if not self._charm.unit.is_leader():
raise UnauthorizedError()
app_databag = self._relation.data[self._charm.app]
# Traefik thrives on yaml, feels pointless to talk json to Route
app_databag["config"] = yaml.safe_dump(config)
| 31.170732 | 100 | 0.716432 |
6ae606afc60ecc5838f9cc0241a5764b14c9f40b
| 29,280 |
py
|
Python
|
tensorflow_probability/python/mcmc/replica_exchange_mc_test.py
|
mrksr/probability
|
242731a9b2b42d4eb676539658a8d5e8267c0720
|
[
"Apache-2.0"
] | 1 |
2021-06-16T20:06:04.000Z
|
2021-06-16T20:06:04.000Z
|
tensorflow_probability/python/mcmc/replica_exchange_mc_test.py
|
mrksr/probability
|
242731a9b2b42d4eb676539658a8d5e8267c0720
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/mcmc/replica_exchange_mc_test.py
|
mrksr/probability
|
242731a9b2b42d4eb676539658a8d5e8267c0720
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for ReplicaExchangeMC."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
def effective_sample_size(x, **kwargs):
"""tfp.mcmc.effective_sample_size, with a maximum appropriate for HMC."""
# Since ESS is an estimate, it can go wrong... E.g. we can have negatively
# correlated samples, which *do* have ESS > N, but this ESS is only applicable
# for variance reduction power for estimation of the mean. We want to
# (blindly) use ESS everywhere (e.g. variance estimates)....and so...
ess = tfp.mcmc.effective_sample_size(x, **kwargs)
n = tf.cast(prefer_static.size0(x), x.dtype)
return tf.minimum(ess, n)
def _set_seed():
"""Helper which uses graph seed if using TFE."""
# TODO(b/68017812): Deprecate once TFE supports seed.
seed_stream = test_util.test_seed_stream()
if tf.executing_eagerly():
tf.random.set_seed(seed_stream())
return None
return seed_stream()
@test_util.test_graph_and_eager_modes
class DefaultSwapProposedFnTest(test_util.TestCase):
@parameterized.named_parameters(
('prob1p0_n1', 1.0, 1),
('prob1p0_n2', 1.0, 2),
('prob1p0_n4', 1.0, 4),
('prob1p0_n5', 1.0, 5),
('prob0p5_n1', 0.5, 1),
('prob0p5_n4', 0.5, 4),
('prob0p5_n7', 0.5, 7),
('prob0p0_n1', 0.0, 1),
('prob0p0_n2', 0.0, 2),
('prob0p0_n5', 0.0, 5),
)
def testProbSwapNumReplicaNoBatch(self, prob_swap, num_replica):
fn = tfp.mcmc.default_swap_proposal_fn(prob_swap)
num_results = 100
swaps = tf.stack(
[fn(num_replica, seed=i) for i in range(num_results)],
axis=0)
self.assertAllEqual((num_results, num_replica), swaps.shape)
self.check_swaps_with_no_batch_shape(self.evaluate(swaps), prob_swap)
@parameterized.named_parameters(
('prob1p0_n1', 1.0, 1),
('prob1p0_n2', 1.0, 2),
('prob1p0_n5', 1.0, 5),
('prob0p5_n1', 0.5, 1),
('prob0p5_n2', 0.5, 2),
('prob0p5_n3', 0.5, 3),
('prob0p0_n1', 0.0, 1),
('prob0p0_n2', 0.0, 2),
('prob0p0_n5', 0.0, 5),
)
def testProbSwapNumReplicaWithBatch(self, prob_swap, num_replica):
fn = tfp.mcmc.default_swap_proposal_fn(prob_swap)
num_results = 100
swaps = tf.stack(
[fn(num_replica, batch_shape=[2], seed=i) for i in range(num_results)],
axis=0)
self.assertAllEqual((num_results, num_replica, 2), swaps.shape)
swaps_ = self.evaluate(swaps)
# Batch members should have distinct swaps in most cases.
frac_same = np.mean(swaps_[..., 0] == swaps_[..., 1])
# If prob_swap == 0, swap is the null_swap always.
if (prob_swap == 0 or
# If num_replica == 1, swap = [0] always.
num_replica == 1 or
# In this case, we always swap and it's always [1, 0].
(num_replica == 2 and prob_swap == 1)):
self.assertEqual(1.0, frac_same)
else:
self.assertLess(frac_same, 0.9)
# Check that each batch member has proper statistics.
for i in range(swaps_.shape[-1]):
self.check_swaps_with_no_batch_shape(swaps_[..., i], prob_swap)
def check_swaps_with_no_batch_shape(self, swaps_, prob_swap):
assert swaps_.ndim == 2, 'Expected shape [num_results, num_replica]'
num_results, num_replica = swaps_.shape
null_swaps = np.arange(num_replica)
# Check that we propose at least one swap, prob_swap fraction of the
# time.
# An exception is made for when num_replica == 1, since in this case the
# only swap is the null swap.
expected_prob_swap = prob_swap * np.float32(num_replica > 1)
observed_prob_swap = np.mean(np.any(swaps_ != null_swaps, axis=1))
self.assertAllClose(
expected_prob_swap,
observed_prob_swap,
rtol=0,
# Accurate to 4 standard errors.
atol=4 * np.sqrt(prob_swap * (1 - prob_swap) / num_results))
# Verify the swap is "once only."
for n in range(20):
self.assertAllEqual(null_swaps, np.take(swaps_[n], swaps_[n]))
@test_util.test_graph_and_eager_modes
class REMCTest(test_util.TestCase):
def setUp(self):
tf.random.set_seed(123)
super(REMCTest, self).setUp()
def _checkNormalREMCSampling(self,
inverse_temperatures,
num_results=1000,
prob_swap=1.0,
dtype=np.float32):
"""Sampling from standard normal with REMC."""
target = tfd.Normal(dtype(0.), dtype(1.))
inverse_temperatures = dtype(inverse_temperatures)
num_replica = len(inverse_temperatures)
step_size = 0.51234 / np.sqrt(inverse_temperatures)
num_leapfrog_steps = 3
def make_kernel_fn(target_log_prob_fn, seed):
return tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
seed=seed,
step_size=step_size,
store_parameters_in_results=True,
num_leapfrog_steps=num_leapfrog_steps)
remc = tfp.mcmc.ReplicaExchangeMC(
target_log_prob_fn=tf.function(target.log_prob, autograph=False),
inverse_temperatures=inverse_temperatures,
make_kernel_fn=make_kernel_fn,
swap_proposal_fn=tfp.mcmc.default_swap_proposal_fn(
prob_swap),
seed=_set_seed())
states, kernel_results = tfp.mcmc.sample_chain(
num_results=num_results,
current_state=target.sample(seed=_set_seed()),
kernel=remc,
num_burnin_steps=50,
trace_fn=lambda _, results: results,
parallel_iterations=1) # For determinism.
self.assertAllEqual((num_results,), states.shape)
states_, kr_, replica_ess_ = self.evaluate([
states,
kernel_results,
# Get the first (and only) state part for all replicas.
effective_sample_size(kernel_results.post_swap_replica_states[0]),
])
logging.vlog(
2, '---- execution:{} mean:{} stddev:{}'.format(
'eager' if tf.executing_eagerly() else 'graph',
states_.mean(), states_.std()))
# Some shortened names.
replica_log_accept_ratio = (
kr_.post_swap_replica_results.log_accept_ratio)
replica_states_ = kr_.post_swap_replica_states[0] # Get rid of "parts"
# Target state is at index 0.
self.assertAllClose(states_, replica_states_[:, 0])
# Check that *each* replica has correct marginal.
def _check_sample_stats(replica_idx):
x = replica_states_[:, replica_idx]
ess = replica_ess_[replica_idx]
err_msg = 'replica_idx={}'.format(replica_idx)
mean_atol = 5 * 1.0 / np.sqrt(ess)
self.assertAllClose(x.mean(), 0.0, atol=mean_atol, msg=err_msg)
# For a tempered Normal, Variance = T.
expected_var = 1 / inverse_temperatures[replica_idx]
var_atol = 5 * expected_var * np.sqrt(2) / np.sqrt(ess)
self.assertAllClose(np.var(x), expected_var, atol=var_atol, msg=err_msg)
for replica_idx in range(num_replica):
_check_sample_stats(replica_idx)
# Test log_accept_ratio and replica_log_accept_ratio.
self.assertAllEqual((num_results, num_replica),
replica_log_accept_ratio.shape)
replica_mean_accept_ratio = np.mean(
np.exp(np.minimum(0, replica_log_accept_ratio)), axis=0)
for accept_ratio in replica_mean_accept_ratio:
# Every single replica should have a decent P[Accept]
self.assertBetween(accept_ratio, 0.2, 0.99)
# Check swap probabilities for adjacent swaps.
self.assertAllEqual((num_results, num_replica - 1),
kr_.is_swap_accepted_adjacent.shape)
conditional_swap_prob = (
np.sum(kr_.is_swap_accepted_adjacent, axis=0) /
np.sum(kr_.is_swap_proposed_adjacent, axis=0)
)
if num_replica > 1 and prob_swap > 0:
# If temperatures are reasonable, this should be the case.
# Ideally conditional_swap_prob is near 30%, but we're not tuning here
self.assertGreater(np.min(conditional_swap_prob), 0.01)
self.assertLess(np.max(conditional_swap_prob), 0.99)
# Check swap probabilities for all swaps.
def _check_swap_matrix(matrix):
self.assertAllEqual((num_results, num_replica, num_replica),
matrix.shape)
# Matrix is stochastic (since you either get swapd with another
# replica, or yourself), and symmetric, since we do once-only swaps.
self.assertAllEqual(np.ones((num_results, num_replica)),
matrix.sum(axis=-1))
self.assertAllEqual(matrix, np.transpose(matrix, (0, 2, 1)))
# By default, all swaps are between adjacent replicas.
for i in range(num_replica):
for j in range(i + 2, num_replica):
self.assertEqual(0.0, np.max(np.abs(matrix[..., i, j])))
_check_swap_matrix(kr_.is_swap_proposed)
_check_swap_matrix(kr_.is_swap_accepted)
# Check inverse_temperatures never change.
self.assertAllEqual(
np.repeat([inverse_temperatures], axis=0, repeats=num_results),
kr_.inverse_temperatures)
# Check that store_parameters_in_results=True worked.
self.assertAllEqual(
np.repeat(
[step_size], axis=0, repeats=num_results),
kr_.post_swap_replica_results.accepted_results.step_size)
self.assertAllEqual(
np.repeat(
[num_leapfrog_steps], axis=0, repeats=num_results),
kr_.post_swap_replica_results.accepted_results.num_leapfrog_steps)
def testNormalOddNumReplicas(self):
"""Sampling from the Standard Normal Distribution."""
self._checkNormalREMCSampling(
inverse_temperatures=[1., 0.8, 0.6],
num_results=500 if tf.executing_eagerly() else 2000,
)
def testNormalEvenNumReplicas(self):
"""Sampling from the Standard Normal Distribution."""
self._checkNormalREMCSampling(
inverse_temperatures=[1., 0.8, 0.7, 0.6],
num_results=500 if tf.executing_eagerly() else 2000,
)
def testNormalHighTemperatureOnly(self):
"""Sampling from a tempered Normal Distribution."""
self._checkNormalREMCSampling(
inverse_temperatures=[0.5],
num_results=500 if tf.executing_eagerly() else 2000,
)
def testNormalLowTemperatureOnly(self):
"""Sampling from a tempered Normal Distribution."""
self._checkNormalREMCSampling(
inverse_temperatures=[2.0],
num_results=500 if tf.executing_eagerly() else 2000,
)
def testRWM2DMixNormal(self):
"""Sampling from a 2-D Mixture Normal Distribution."""
dtype = np.float32
# By symmetry, target has mean [0, 0]
# Therefore, Var = E[X^2] = E[E[X^2 | c]], where c is the component.
# Now..., for the first component,
# E[X1^2] = Var[X1] + Mean[X1]^2
# = 0.3^2 + 1^2,
# and similarly for the second. As a result, Var[mixture] = 1.09.
target = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(probs=[0.5, 0.5]),
components_distribution=tfd.MultivariateNormalDiag(
loc=[[-1., -1], [1., 1.]],
scale_identity_multiplier=0.3))
inverse_temperatures = 10.**tf.linspace(start=0., stop=-1., num=4)
# We need to pad the step_size so it broadcasts against MCMC samples. In
# this case we have 1 replica dim, 0 batch dims, and 1 event dim hence need
# to right pad the step_size by one dim (for the event).
step_size = 0.2 / tf.math.sqrt(inverse_temperatures[:, tf.newaxis])
def make_kernel_fn(target_log_prob_fn, seed):
return tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
seed=seed,
step_size=step_size,
num_leapfrog_steps=5)
remc = tfp.mcmc.ReplicaExchangeMC(
target_log_prob_fn=tf.function(target.log_prob, autograph=False),
# Verified that test fails if inverse_temperatures = [1.]
inverse_temperatures=inverse_temperatures,
make_kernel_fn=make_kernel_fn,
seed=_set_seed())
def trace_fn(state, results): # pylint: disable=unused-argument
return results.post_swap_replica_results.log_accept_ratio
num_results = 500 if tf.executing_eagerly() else 2000
states, replica_log_accept_ratio = tfp.mcmc.sample_chain(
num_results=num_results,
# Start at one of the modes, in order to make mode jumping necessary
# if we want to pass test.
current_state=tf.ones(2, dtype=dtype),
kernel=remc,
num_burnin_steps=50,
trace_fn=trace_fn,
parallel_iterations=1) # For determinism.
self.assertAllEqual((num_results, 2), states.shape)
replica_accept_ratio = tf.reduce_mean(
tf.math.exp(tf.minimum(0., replica_log_accept_ratio)),
axis=0)
[
sample_mean_,
sample_variance_,
replica_accept_ratio_,
expected_mean_,
expected_stddev_,
expected_variance_,
ess_,
] = self.evaluate([
tf.reduce_mean(states, axis=0),
tfp.stats.variance(states),
replica_accept_ratio,
target.mean(),
target.stddev(),
target.variance(),
effective_sample_size(states),
])
logging.vlog(
2, '---- execution:{} accept_ratio:{} mean:{}'.format(
'eager' if tf.executing_eagerly() else 'graph',
replica_accept_ratio_, sample_mean_))
self.assertAllClose(
expected_mean_,
sample_mean_,
atol=5 * expected_stddev_ / np.sqrt(np.min(ess_)))
self.assertAllClose(
expected_variance_,
sample_variance_,
atol=5 * expected_variance_ / np.sqrt(np.min(ess_)))
def testMultipleCorrelatedStatesWithNoBatchDims(self):
dtype = np.float32
num_results = 500 if tf.executing_eagerly() else 2000
true_mean = dtype([0, 0])
true_cov = dtype([[1, 0.5], [0.5, 1]])
# Use LinearOperatorLowerTriangular to get broadcasting ability.
linop = tf.linalg.LinearOperatorLowerTriangular(
tf.linalg.cholesky(true_cov))
# Its ok to decorate this since we only need to stress the TransitionKernel.
@tf.function(autograph=False)
def target_log_prob(x, y):
# Corresponds to unnormalized MVN.
# z = matmul(inv(chol(true_cov)), [x, y] - true_mean)
xy = tf.stack([x, y], axis=-1) - true_mean
z = linop.solvevec(xy)
return -0.5 * tf.reduce_sum(z**2., axis=-1)
inverse_temperatures = tf.constant([1., 0.75, 0.5])
# We need to pad the step_size so it broadcasts against MCMC samples. In
# this case we have 1 replica dim, 0 batch dims, and 0 event dims (per each
# of 2 state parts) hence no padding is needed.
# We do however supply a step size for each state part.
step_sizes = [0.9 / tf.math.sqrt(inverse_temperatures)]*2
def make_kernel_fn(target_log_prob_fn, seed):
return tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
seed=seed,
step_size=step_sizes,
num_leapfrog_steps=3)
remc = tfp.mcmc.ReplicaExchangeMC(
target_log_prob_fn=target_log_prob,
inverse_temperatures=inverse_temperatures,
make_kernel_fn=make_kernel_fn,
seed=_set_seed())
def trace_fn(state, results): # pylint: disable=unused-argument
return results.post_swap_replica_results.log_accept_ratio
[samples_x,
samples_y], replica_log_accept_ratio = (tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=200,
current_state=[1., 1.],
kernel=remc,
trace_fn=trace_fn,
parallel_iterations=1)) # For determinism.
samples = tf.stack([samples_x, samples_y], axis=-1)
sample_mean = tf.reduce_mean(samples, axis=0)
sample_cov = tfp.stats.covariance(samples, sample_axis=0)
replica_accept_ratio = tf.reduce_mean(
tf.math.exp(tf.minimum(0., replica_log_accept_ratio)),
axis=0)
[
sample_mean_,
sample_cov_,
replica_accept_ratio_,
ess_,
] = self.evaluate([
sample_mean,
sample_cov,
replica_accept_ratio,
effective_sample_size(samples),
])
logging.vlog(
2, '---- execution:{} accept_ratio:{} mean:{} cov:{}'.format(
'eager' if tf.executing_eagerly() else 'graph',
replica_accept_ratio_, sample_mean_, sample_cov_))
self.assertAllEqual([num_results], samples_x.shape)
self.assertAllEqual([num_results], samples_y.shape)
max_scale = np.sqrt(np.max(true_cov))
self.assertAllClose(
true_mean, sample_mean_, atol=5 * max_scale / np.sqrt(np.min(ess_)))
self.assertAllClose(
true_cov, sample_cov_, atol=5 * max_scale**2 / np.sqrt(np.min(ess_)))
def _checkMVNWithOneBatchDim(self, inverse_temperatures, step_size):
"""Sampling from two batch diagonal multivariate normal."""
step_size += np.exp(np.pi) / 100 # Prevent resonances.
# Small scale and well-separated modes mean we need replica swap to
# work or else tests fail.
loc = np.array(
[
# Use 3-D normals, ensuring batch and event sizes don't broadcast.
[-1., -0.5, 0.], # loc of first batch
[1., 0.5, 0.], # loc of second batch
],
dtype=np.float32)
scale_identity_multiplier = [0.5, 0.8]
target = tfd.MultivariateNormalDiag(
loc=loc, scale_identity_multiplier=scale_identity_multiplier)
def make_kernel_fn(target_log_prob_fn, seed):
return tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
seed=seed,
step_size=step_size,
num_leapfrog_steps=3)
remc = tfp.mcmc.ReplicaExchangeMC(
target_log_prob_fn=tf.function(
lambda x: target.copy().log_prob(x), autograph=False),
inverse_temperatures=inverse_temperatures,
make_kernel_fn=make_kernel_fn,
seed=_set_seed())
def trace_fn(state, results): # pylint: disable=unused-argument
return [
results.post_swap_replica_results.log_accept_ratio,
results.post_swap_replica_states
]
num_results = 500 if tf.executing_eagerly() else 2000
states, (log_accept_ratio, replica_states) = tfp.mcmc.sample_chain(
num_results=num_results,
current_state=loc[::-1], # Batch members far from their mode!
kernel=remc,
num_burnin_steps=100,
trace_fn=trace_fn,
parallel_iterations=1) # For determinism.
num_replica = inverse_temperatures.shape[0]
self.assertLen(replica_states, 1) # One state part
replica_states = replica_states[0]
self.assertAllEqual((num_results, num_replica) + loc.shape,
replica_states.shape)
self.assertAllEqual((num_results,) + loc.shape, states.shape)
(
states_,
replica_states_,
replica_mean_,
replica_cov_,
accept_probs_,
ess_,
) = self.evaluate([
states,
replica_states,
tf.reduce_mean(replica_states, axis=0),
tfp.stats.covariance(replica_states),
tf.math.exp(tf.minimum(0., log_accept_ratio)),
effective_sample_size(replica_states),
])
logging.vlog(
2, '---- execution:{} Min[ESS]: {} mean_accept: {}'.format(
'eager' if tf.executing_eagerly() else 'graph',
np.min(ess_), np.mean(accept_probs_, axis=0)))
self.assertAllEqual(states_, replica_states_[:, 0])
def _check_stats(replica_idx, batch_idx):
err_msg = 'Failure in replica {}, batch {}'.format(replica_idx, batch_idx)
assert inverse_temperatures.ndim in [1, 2]
if inverse_temperatures.ndim == 1:
temperature = 1 / inverse_temperatures[replica_idx]
elif inverse_temperatures.ndim == 2:
temperature = 1 / inverse_temperatures[replica_idx, batch_idx]
expected_scale = (
scale_identity_multiplier[batch_idx] * np.sqrt(temperature))
ess = np.min(ess_[replica_idx, batch_idx]) # Conservative estimate.
self.assertGreater(ess, num_results / 10, msg='Bad sampling!')
self.assertAllClose(
replica_mean_[replica_idx, batch_idx],
loc[batch_idx],
# 5 standard errors of a mean estimate.
atol=5 * expected_scale / np.sqrt(ess),
msg=err_msg)
self.assertAllClose(
expected_scale**2 * np.eye(loc.shape[1]),
replica_cov_[replica_idx, batch_idx],
# 10 standard errors of a variance estimate.
atol=10 * np.sqrt(2) * expected_scale**2 / np.sqrt(ess),
msg=err_msg)
for replica_idx in range(num_replica):
for batch_idx in range(loc.shape[0]):
_check_stats(replica_idx, batch_idx)
def test1EventDim2BatchDim3Replica1DTemperatureScalarStep(self):
inverse_temperatures = np.float32([1.0, 0.5, 0.25])
step_size = 0.5
self._checkMVNWithOneBatchDim(inverse_temperatures, step_size)
def test1EventDim2BatchDim3Replica1DTemperature1DStep(self):
inverse_temperatures = np.float32([1.0, 0.5, 0.25])
# We need to pad the step_size so it broadcasts against MCMC samples.
step_size = 0.5 / np.sqrt(inverse_temperatures).reshape(3, 1, 1)
self._checkMVNWithOneBatchDim(inverse_temperatures, step_size)
def test1EventDim2BatchDim3Replica1DTemperature2DStep(self):
inverse_temperatures = np.float32([1.0, 0.5, 0.25])
# We need to pad the step_size so it broadcasts against MCMC samples.
step_size = np.stack([
0.5 / np.sqrt(inverse_temperatures),
0.5 / np.sqrt(inverse_temperatures),
], axis=-1).reshape(3, 2, 1)
self._checkMVNWithOneBatchDim(inverse_temperatures, step_size)
def test1EventDim2BatchDim3Replica2DTemperature1DStep(self):
# Shape [3, 2].
inverse_temperatures = np.float32(
np.stack([[1.0, 0.5, 0.25], [1.0, 0.25, 0.05]], axis=-1))
# We need to pad the step_size so it broadcasts against MCMC samples.
step_size = 0.5 / np.sqrt(inverse_temperatures).reshape(3, 2, 1)
self._checkMVNWithOneBatchDim(inverse_temperatures, step_size)
def test1EventDim2BatchDim3Replica2DTemperature2DStep(self):
# Shape [3, 2].
inverse_temperatures = np.float32(
np.stack([[1.0, 0.5, 0.25], [1.0, 0.25, 0.05]], axis=-1))
# We need to pad the step_size so it broadcasts against MCMC samples.
step_size = 0.5 / np.sqrt(inverse_temperatures).reshape(3, 2, 1)
self._checkMVNWithOneBatchDim(inverse_temperatures, step_size)
def testMultipleCorrelatedStatesWithOneBatchDim(self):
dtype = np.float32
true_mean = dtype([0, 0])
true_cov = dtype([[1, 0.5], [0.5, 1]])
# Use LinearOperatorLowerTriangular to get broadcasting ability.
linop = tf.linalg.LinearOperatorLowerTriangular(
tf.linalg.cholesky(true_cov))
num_results = 250 if tf.executing_eagerly() else 2000
def target_log_prob(x, y):
# Corresponds to unnormalized MVN.
# z = matmul(inv(chol(true_cov)), [x, y] - true_mean)
xy = tf.stack([x, y], axis=-1) - true_mean
z = linop.solvevec(xy)
return -0.5 * tf.reduce_sum(z**2., axis=-1)
def make_kernel_fn(target_log_prob_fn, seed):
return tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
seed=seed,
step_size=[0.75, 0.75],
num_leapfrog_steps=3)
remc = tfp.mcmc.ReplicaExchangeMC(
target_log_prob_fn=tf.function(target_log_prob, autograph=False),
inverse_temperatures=[1., 0.9, 0.8],
make_kernel_fn=make_kernel_fn,
seed=_set_seed())
states = tfp.mcmc.sample_chain(
num_results=num_results,
# batch_shape = [4] for each initial state
current_state=[tf.ones(4), tf.ones(4)],
kernel=remc,
num_burnin_steps=400,
trace_fn=None,
parallel_iterations=1) # For determinism.
states = tf.stack(states, axis=-1)
self.assertAllEqual((num_results, 4, 2), states.shape)
states_, ess_, cov_ = self.evaluate([
states,
effective_sample_size(states),
tfp.stats.covariance(states)
])
self.assertGreater(np.min(ess_), num_results / 10, 'Bad sampling found!')
# 5 standard errors for mean/variance estimates.
mean_atol = 5 / np.sqrt(np.min(ess_))
cov_atol = 5 * np.sqrt(2) / np.sqrt(np.min(ess_))
self.assertAllClose(
true_mean, states_[:, 0, :].mean(axis=0), atol=mean_atol)
self.assertAllClose(
true_mean, states_[:, 1, :].mean(axis=0), atol=mean_atol)
self.assertAllClose(true_cov, cov_[0], atol=cov_atol)
self.assertAllClose(true_cov, cov_[1], atol=cov_atol)
def testInversePermutationError(self):
"""Using invalid `inverse_temperatures`."""
dtype = np.float32
def bad_swap_fn(num_replica, batch_shape=(), seed=None): # pylint: disable=unused-argument
return [1, 2, 0]
remc = tfp.mcmc.ReplicaExchangeMC(
target_log_prob_fn=tfd.Normal(loc=dtype(0), scale=dtype(1)).log_prob,
inverse_temperatures=dtype([1., 0.5, 0.25]),
make_kernel_fn=lambda tlp, seed: tfp.mcmc.HamiltonianMonteCarlo( # pylint: disable=g-long-lambda
target_log_prob_fn=tlp,
seed=seed,
step_size=1.,
num_leapfrog_steps=3),
# Fun fact: of the six length-3 permutations, only two are not
# "one-time swap" permutations: [1, 2, 0], [2, 0, 1]
swap_proposal_fn=bad_swap_fn,
validate_args=True,
seed=_set_seed())
with self.assertRaisesRegexp(
tf.errors.OpError, 'must be.*self-inverse permutation'):
self.evaluate(tfp.mcmc.sample_chain(
num_results=10,
num_burnin_steps=2,
current_state=[dtype(1)],
kernel=remc,
trace_fn=None,
parallel_iterations=1)) # For determinism.
def testKernelResultsHaveCorrectShapeWhenMultipleStatesAndBatchDims(self):
def target_log_prob(x, y):
xy = tf.concat([x, y], axis=-1)
return -0.5 * tf.reduce_sum(xy**2, axis=-1)
def make_kernel_fn(target_log_prob_fn, seed):
return tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
seed=seed,
step_size=[0.3, 0.1],
num_leapfrog_steps=3)
inverse_temperatures = [1., 0.5, 0.25, 0.1]
remc = tfp.mcmc.ReplicaExchangeMC(
target_log_prob_fn=tf.function(target_log_prob, autograph=False),
inverse_temperatures=inverse_temperatures,
make_kernel_fn=make_kernel_fn,
seed=_set_seed())
num_results = 6
n_batch = 5
n_events = 3
n_states = 2 # Set by target_log_prob.
num_replica = len(inverse_temperatures)
samples, kernel_results = tfp.mcmc.sample_chain(
num_results=num_results,
current_state=[tf.zeros((n_batch, n_events))] * n_states,
kernel=remc,
num_burnin_steps=2,
trace_fn=lambda _, results: results)
self.assertLen(samples, n_states)
self.assertAllEqual((num_results, n_batch, n_events), samples[0].shape)
self.assertAllEqual((num_results, n_batch, n_events), samples[1].shape)
kr_ = self.evaluate(kernel_results)
# Boring checks of existence/shape.
self.assertEqual(
(num_results, num_replica, n_batch, n_states, n_events),
tf.stack(kr_.post_swap_replica_states, axis=-2).shape)
self.assertEqual(
(num_results, num_replica, n_batch),
kr_.pre_swap_replica_results.log_accept_ratio.shape)
self.assertEqual(
(num_results, num_replica, n_batch),
kr_.post_swap_replica_results.log_accept_ratio.shape)
self.assertEqual(
(num_results, num_replica, num_replica, n_batch),
kr_.is_swap_proposed.shape)
self.assertEqual(
(num_results, num_replica, num_replica, n_batch),
kr_.is_swap_accepted.shape)
self.assertEqual(
(num_results, num_replica - 1, n_batch),
kr_.is_swap_proposed_adjacent.shape)
self.assertEqual(
(num_results, num_replica - 1, n_batch),
kr_.is_swap_accepted_adjacent.shape)
self.assertEqual(
(num_results, num_replica),
tf.stack(kr_.inverse_temperatures, axis=1).shape)
self.assertEqual(
(num_results, num_replica, n_batch),
kr_.swaps.shape)
if __name__ == '__main__':
tf.test.main()
| 37.063291 | 105 | 0.664447 |
ad4ee3ebb2dbdb6953e628109cccddc75df8ba2d
| 3,835 |
py
|
Python
|
bdd_text_editor.py
|
shivahari/bdd-text-editor
|
b53b2e7d58fd3c912f773424560c4cc320c02a24
|
[
"MIT"
] | null | null | null |
bdd_text_editor.py
|
shivahari/bdd-text-editor
|
b53b2e7d58fd3c912f773424560c4cc320c02a24
|
[
"MIT"
] | null | null | null |
bdd_text_editor.py
|
shivahari/bdd-text-editor
|
b53b2e7d58fd3c912f773424560c4cc320c02a24
|
[
"MIT"
] | null | null | null |
#!/bin/env python3.7
import json
import re
from tkinter import Tk, ttk, Frame, Entry,\
END, Text, Scrollbar, \
RIGHT, LEFT, X, Y, TOP, BOTTOM,\
filedialog, Listbox, SINGLE, ACTIVE
class Application():
def __init__(self):
"Initialize Application"
self.root = Tk()
# set title
self.root.title('BDD Text Editor')
self.root.attributes('-fullscreen',True)
#set save button
self.save_button()
# set scroll bar
self.set_scroll_bar()
#set run button
self.run_button()
# set text widget
self.text = Text(font=("Helvetica", 18))
#self.text.bind('<Return>', self.auto_complete)
self.text.bind('<space>', self.auto_complete)
self.text.pack(expand=True, fill='both')
# read the steps json
self.steps = self.read_steps_json()
self.root.mainloop()
def set_scroll_bar(self):
"Set a scroll bar to text widget"
scroll_bar = Scrollbar(self.root)
scroll_bar.pack(side=RIGHT, fill=Y)
def save_button(self):
"Save button"
save_button = ttk.Button(self.root, text='Save', command=self.saveas)
save_button.pack(anchor='e', side=BOTTOM)
def saveas(self):
"Save a file"
text = self.text.get('1.0', 'end-1c')
save_location = filedialog.asksaveasfilename()
file = open(save_location, 'w+')
file.write(text)
file.close()
def run_button(self):
"Run the file"
run_button = ttk.Button(self.root, text='Run', command=self.run_file)
run_button.pack(anchor='w', side=BOTTOM)
def run_file(self):
"Run the file"
pass
def auto_complete(self, event):
"Auto complete the text"
try:
self.list_box.destroy()
except:
pass
#step = self.text.get('1.0', 'end-1c')
self.current_step = self.text.get('end - 1 lines linestart', 'end - 1 lines lineend')
self.check_sp_char = re.search('^\W+', self.current_step)
if self.check_sp_char:
self.current_step = self.current_step.strip(self.check_sp_char.group())
print(self.check_sp_char.group())
if len(self.current_step.split(' ')) >= 2:
self.matching_steps = []
re_match = re.compile(self.current_step + '.*')
self.matching_steps = list(filter(re_match.match, self.steps))
if self.matching_steps:
self.list_box = Listbox(self.text, selectmode=SINGLE)
#self.list_box.delete(0, END)
for i in range(0,len(self.matching_steps)):
self.list_box.insert(i+1, self.matching_steps[i])
self.list_box.pack(expand=True, fill=X)
self.list_box.bind('<<ListboxSelect>>', self.on_list_box_select)
def on_list_box_select(self, event):
"Actions after selecting list bos"
selection_index = int(self.list_box.curselection()[0])
# delete the existing line & insert the new line
self.text.delete('current linestart', 'current lineend')
replace_string = self.matching_steps[selection_index]
if self.check_sp_char:
replace_string = self.check_sp_char.group() + replace_string
self.text.insert(END, replace_string)
#self.list_box.delete(0, END)
#print(dir(self.list_box))
self.list_box.destroy()
self.matching_steps = []
def read_steps_json(self):
"Read the steps json file"
with open('steps_catalog.json', 'rb') as steps_file:
steps = json.load(steps_file)
steps = [step['name'] for step in steps]
return steps
app = Application()
| 31.434426 | 93 | 0.589048 |
ecea089966d4d372133534e02eae02cf0e35598f
| 693 |
py
|
Python
|
week1/assignment7/7-exercise2.py
|
brian-gpu/assignments
|
77dc8254f256fb329bd7508cfd2cde5c8384e836
|
[
"MIT"
] | null | null | null |
week1/assignment7/7-exercise2.py
|
brian-gpu/assignments
|
77dc8254f256fb329bd7508cfd2cde5c8384e836
|
[
"MIT"
] | null | null | null |
week1/assignment7/7-exercise2.py
|
brian-gpu/assignments
|
77dc8254f256fb329bd7508cfd2cde5c8384e836
|
[
"MIT"
] | null | null | null |
def convert_temperature(temperature):
old_temperature = temperature
new_temperature = 0
if(old_temperature[-1] == 'F'):
new_temperature = float(old_temperature[0:-2])
new_temperature -= 32
new_temperature *= (5/9)
new_temperature = int(new_temperature)
print(f"{old_temperature} is {new_temperature} in Celsius")
else:
new_temperature = float(old_temperature[0:-2])
new_temperature *= (9/5)
new_temperature += 32
new_temperature = int(new_temperature)
print(f"{old_temperature} is {new_temperature} in Fahrenheit")
temperature = input("Input a temperature:\n")
convert_temperature(temperature)
| 33 | 70 | 0.673882 |
c78202fc46877b9199a582ebe8ca99df13a4f1ba
| 4,399 |
py
|
Python
|
benchmarks.py
|
Congyuwang/RocksDict
|
bad2f88838e5ba2bd85dc21c7cc6550d02783772
|
[
"MIT"
] | 12 |
2021-12-30T02:29:43.000Z
|
2022-03-21T09:22:08.000Z
|
benchmarks.py
|
Congyuwang/RocksDict
|
bad2f88838e5ba2bd85dc21c7cc6550d02783772
|
[
"MIT"
] | 6 |
2022-01-27T21:48:00.000Z
|
2022-03-29T10:26:29.000Z
|
benchmarks.py
|
Congyuwang/RocksDict
|
bad2f88838e5ba2bd85dc21c7cc6550d02783772
|
[
"MIT"
] | 3 |
2021-11-18T11:15:54.000Z
|
2022-03-16T11:10:05.000Z
|
import os
from subprocess import run
from pathlib import Path
import json
import re
import pandas as pd
import matplotlib.pyplot as plt
BENCH_RESULT_FOLDER = Path("./bench_result")
BENCH_PLOT_FOLDER = Path("./bench_plot")
TOTAL_ROUNDS = 5
RESULT_FILE_NAME = re.compile("bench_(.*?)_n(\\d+)_k(\\d+)_v(\\d+)_r(\\d+)\\.json")
N_K_V = [(10000, 16, 100), (1000, 16, 100000)]
TEST_NAME_DICT = {'test_fill_raw_sequential': 'insert_sequential',
'test_fill_raw_batch_sequential': 'insert_sequential',
'test_read_hot_raw': 'random_read',
'test_delete_sequential_raw': 'delete_sequential',
'test_read_sequential_raw': 'read_sequential',
'test_get_raw_batch_sequential': 'read_sequential'}
def cmd(dbname, num, k_size, v_size, b_size, rounds):
command = f"pytest " \
f"-s --dbname {dbname} " \
f"--num {num} " \
f"--k_size {k_size} " \
f"--v_size {v_size} " \
f"--batch_size {b_size} " \
f"--percent 1.0 " \
f"benchmark.py " \
f"--benchmark-json " \
f"./{BENCH_RESULT_FOLDER}/" \
f"bench_{dbname}_n{num}_k{k_size}_v{v_size}_r{rounds}.json".split()
print(f"ROUND {rounds}:", " ".join(command))
return command
def from_file_name(file_name):
db, num_keys, k_size, v_size, rounds = RESULT_FILE_NAME.findall(file_name)[0]
return {
"db_name": db,
"num_keys": int(num_keys),
"key_size": int(k_size),
"value_size": int(v_size),
"rounds": int(rounds),
}
def load_bench_result():
results_files = [f for f in os.listdir(BENCH_RESULT_FOLDER)
if f.startswith("bench") and f.endswith(".json")]
bench_results = []
for r in results_files:
with open(BENCH_RESULT_FOLDER / r, "r") as f:
bench_data = json.load(f)
bench_meta = from_file_name(r)
for group in bench_data["benchmarks"]:
new_row = bench_meta.copy()
new_row.update({"test_name": group["name"],
"mean": group["stats"]["mean"],
"ops": group["stats"]["ops"], })
bench_results.append(new_row)
data_frame = pd.DataFrame(bench_results)
return data_frame
def plot_single_test(test_name: str, num_keys: int,
key_size: int, value_size: int, r_df: pd.DataFrame):
title = f'{TEST_NAME_DICT[test_name]}' \
f'(num_keys={num_keys}, ' \
f'ksize={key_size}, ' \
f'vsize={value_size})'
df_slice = r_df[(r_df['test_name'] == test_name)
& (r_df['num_keys'] == num_keys)
& (r_df['key_size'] == key_size)
& (r_df['value_size'] == value_size)]
df_slice.set_index('db_name', inplace=True)
ops = 1 / df_slice['mean'] * df_slice['num_keys']
ax = ops.plot.bar(title=title, ylabel='ops')
out_title = title.replace(',', '-') + '.png'
plt.xticks(rotation=-20)
fig = ax.get_figure()
fig.savefig(BENCH_PLOT_FOLDER / out_title, pad_inches=0)
plt.show()
def plot_benchmarks(df: pd.DataFrame, num_keys_ksize_vsize_list: list):
result_df = df.groupby(['db_name', 'num_keys',
'key_size', 'value_size',
'test_name'])['mean'].mean().reset_index()
test_names = [n for n in df['test_name'].unique()
if "raw" in n and "batch" not in n]
for n_k_v in num_keys_ksize_vsize_list:
num_keys, ksize, vsize = n_k_v
for test in test_names:
plot_single_test(test, num_keys, ksize, vsize, result_df)
if __name__ == '__main__':
os.chdir("./benchmark")
if not BENCH_RESULT_FOLDER.exists():
os.mkdir(BENCH_RESULT_FOLDER)
if not BENCH_PLOT_FOLDER.exists():
os.mkdir(BENCH_PLOT_FOLDER)
for r in range(TOTAL_ROUNDS):
run(cmd("rocks_db_raw", 10000, 16, 100, 1000, r))
run(cmd("py_vidar_db", 10000, 16, 100, 1000, r))
run(cmd("semi_dbm", 10000, 16, 100, 1000, r))
run(cmd("rocks_db_raw", 1000, 16, 100000, 100, r))
run(cmd("py_vidar_db", 1000, 16, 100000, 100, r))
run(cmd("semi_dbm", 1000, 16, 100000, 100, r))
plot_benchmarks(load_bench_result(), N_K_V)
| 38.587719 | 83 | 0.576949 |
9f56fcbb5f67280d2327eba0ca3553676c82e8e7
| 472 |
py
|
Python
|
data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_dead_log_large_evil_fire_small.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20 |
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_dead_log_large_evil_fire_small.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_dead_log_large_evil_fire_small.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20 |
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/base/shared_poi_all_lair_dead_log_large_evil_fire_small.iff"
result.attribute_template_id = -1
result.stfName("lair_n","dead_log")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 27.764706 | 101 | 0.739407 |
ad38f31afb585daa5fd2bcc05cd0653e37110b99
| 2,985 |
py
|
Python
|
mnelab/dialogs/xdf_chunks.py
|
hofaflo/mnelab
|
4501d0becdd161015de592106472cb1ea86da37a
|
[
"BSD-3-Clause"
] | null | null | null |
mnelab/dialogs/xdf_chunks.py
|
hofaflo/mnelab
|
4501d0becdd161015de592106472cb1ea86da37a
|
[
"BSD-3-Clause"
] | null | null | null |
mnelab/dialogs/xdf_chunks.py
|
hofaflo/mnelab
|
4501d0becdd161015de592106472cb1ea86da37a
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) MNELAB developers
#
# License: BSD (3-clause)
from PySide6.QtCore import Qt, Slot
from PySide6.QtGui import QFont, QStandardItem, QStandardItemModel
from PySide6.QtWidgets import (
QAbstractItemView,
QDialog,
QDialogButtonBox,
QHBoxLayout,
QPlainTextEdit,
QTableView,
QVBoxLayout,
)
def _add_item(item):
tmp = QStandardItem()
tmp.setData(item, Qt.DisplayRole)
return tmp
class XDFChunksDialog(QDialog):
def __init__(self, parent, chunks, fname):
super().__init__(parent)
self.setWindowTitle(f"XDF Chunks ({fname})")
self.chunks = chunks
TAGS = {1: "FileHeader", 2: "StreamHeader", 3: "Samples", 4: "ClockOffset",
5: "Boundary", 6: "StreamFooter"}
self.model = QStandardItemModel()
self.model.setHorizontalHeaderLabels(["#", "Bytes", "Tag", "Stream ID"])
for i, chunk in enumerate(chunks, start=1):
row = []
row.append(_add_item(i))
row.append(_add_item(chunk["nbytes"]))
row.append(_add_item(f"{chunk['tag']} ({TAGS[chunk['tag']]})"))
row.append(_add_item(chunk.get("stream_id", "")))
self.model.appendRow(row)
self.view = QTableView()
self.view.setModel(self.model)
self.view.verticalHeader().setVisible(False)
self.view.horizontalHeader().setStretchLastSection(True)
self.view.setShowGrid(False)
self.view.setSelectionMode(QAbstractItemView.SingleSelection)
self.view.setSelectionBehavior(QAbstractItemView.SelectRows)
self.view.setSortingEnabled(True)
self.view.sortByColumn(0, Qt.AscendingOrder)
self.view.setEditTriggers(QTableView.NoEditTriggers)
self.view.setFixedWidth(450)
self.details = QPlainTextEdit("")
self.details.setFixedWidth(450)
self.details.setReadOnly(True)
self.details.setTabStopDistance(30)
font = QFont()
font.setFamily("monospace")
font.setStyleHint(QFont.Monospace)
self.details.setFont(font)
self.details.setLineWrapMode(QPlainTextEdit.NoWrap)
hbox = QHBoxLayout()
hbox.addWidget(self.view)
hbox.addWidget(self.details)
vbox = QVBoxLayout(self)
vbox.addLayout(hbox)
self.buttonbox = QDialogButtonBox(QDialogButtonBox.Ok)
vbox.addWidget(self.buttonbox)
self.buttonbox.accepted.connect(self.accept)
self.view.clicked.connect(self._update_details)
self.setFixedSize(980, 650)
self.view.setColumnWidth(0, 70)
self.view.setColumnWidth(1, 80)
self.view.setColumnWidth(2, 150)
self.view.setColumnWidth(3, 70)
@Slot()
def _update_details(self):
selection = self.view.selectionModel()
if selection.hasSelection():
n = int(selection.selectedIndexes()[0].data())
self.details.setPlainText(self.chunks[n - 1].get("content", ""))
| 32.802198 | 83 | 0.646566 |
8ad7ceeade97b77a321ad4b1c3411b8fe095f3f0
| 712 |
py
|
Python
|
netbox/utilities/query_functions.py
|
TheFlyingCorpse/netbox
|
a226f06b1beb575011d783b202d76cb74d3b1f79
|
[
"Apache-2.0"
] | 4,994 |
2019-07-01T13:15:44.000Z
|
2022-03-31T19:55:45.000Z
|
netbox/utilities/query_functions.py
|
emersonfelipesp/netbox
|
fecca5ad83fb6b48a2f15982dfd3242653f105f9
|
[
"Apache-2.0"
] | 4,045 |
2019-07-01T14:24:09.000Z
|
2022-03-31T16:07:39.000Z
|
netbox/utilities/query_functions.py
|
emersonfelipesp/netbox
|
fecca5ad83fb6b48a2f15982dfd3242653f105f9
|
[
"Apache-2.0"
] | 1,225 |
2019-07-01T15:34:03.000Z
|
2022-03-31T16:47:09.000Z
|
from django.contrib.postgres.aggregates import JSONBAgg
from django.db.models import F, Func
class CollateAsChar(Func):
"""
Disregard localization by collating a field as a plain character string. Helpful for ensuring predictable ordering.
"""
function = 'C'
template = '(%(expressions)s) COLLATE "%(function)s"'
class EmptyGroupByJSONBAgg(JSONBAgg):
"""
JSONBAgg is a builtin aggregation function which means it includes the use of a GROUP BY clause.
When used as an annotation for collecting config context data objects, the GROUP BY is
incorrect. This subclass overrides the Django ORM aggregation control to remove the GROUP BY.
"""
contains_aggregate = False
| 35.6 | 119 | 0.740169 |
e927321ad1775c5508fcfcfc476905e105426b40
| 1,262 |
py
|
Python
|
src/handle_union_key.py
|
bcyxy/simple_spider
|
e352da8d7d39d2d6eaf6010edcaac0d52d368f47
|
[
"MIT"
] | null | null | null |
src/handle_union_key.py
|
bcyxy/simple_spider
|
e352da8d7d39d2d6eaf6010edcaac0d52d368f47
|
[
"MIT"
] | null | null | null |
src/handle_union_key.py
|
bcyxy/simple_spider
|
e352da8d7d39d2d6eaf6010edcaac0d52d368f47
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
import logging
class UnionKeyHandler(object):
def __init__(self):
self.uk_delimiter = "||||"
self.uk_slices_len = 3
def make_union_key(self, spider_key, req_time, sub_key):
# Check parameters.
try:
int(req_time)
except:
logging.warning("Make union_key error. &req_time=%s" %str(req_time))
return
union_key = (
"%s%s%s%s%s"
%(
spider_key, self.uk_delimiter,
req_time, self.uk_delimiter, sub_key
)
)
return union_key
def split_union_key(self, union_key):
uk_slices = union_key.split(self.uk_delimiter)
if len(uk_slices) != self.uk_slices_len:
logging.warning("Split union_key error. &union_key=%s" %union_key)
return
spider_key = uk_slices[0]
try:
req_time = int(uk_slices[1])
except:
logging.warning("Split union_key error. &union_key=%s" %union_key)
return
sub_key = uk_slices[2]
union_key_mult = (spider_key, req_time, sub_key)
return union_key_mult
union_key_handler = UnionKeyHandler()
| 28.044444 | 80 | 0.55309 |
79b41a2bd860a59799869dc902cb8008d2e01452
| 2,809 |
py
|
Python
|
cactus/pools/pool_config.py
|
grayfallstown/cactus-blockchain
|
680d68d0bb7694bd4b99e4906b356e014bca7734
|
[
"Apache-2.0"
] | 20 |
2021-07-16T18:08:13.000Z
|
2022-03-20T02:38:39.000Z
|
cactus/pools/pool_config.py
|
grayfallstown/cactus-blockchain
|
680d68d0bb7694bd4b99e4906b356e014bca7734
|
[
"Apache-2.0"
] | 29 |
2021-07-17T00:38:18.000Z
|
2022-03-29T19:11:48.000Z
|
cactus/pools/pool_config.py
|
grayfallstown/cactus-blockchain
|
680d68d0bb7694bd4b99e4906b356e014bca7734
|
[
"Apache-2.0"
] | 21 |
2021-07-17T02:18:57.000Z
|
2022-03-15T08:26:56.000Z
|
import logging
from dataclasses import dataclass
from pathlib import Path
from typing import List
from blspy import G1Element
from cactus.types.blockchain_format.sized_bytes import bytes32
from cactus.util.byte_types import hexstr_to_bytes
from cactus.util.config import load_config, save_config
from cactus.util.streamable import Streamable, streamable
"""
Config example
This is what goes into the user's config file, to communicate between the wallet and the farmer processes.
pool_list:
launcher_id: ae4ef3b9bfe68949691281a015a9c16630fc8f66d48c19ca548fb80768791afa
authentication_public_key: 970e181ae45435ae696508a78012dc80548c334cf29676ea6ade7049eb9d2b9579cc30cb44c3fd68d35a250cfbc69e29
owner_public_key: 84c3fcf9d5581c1ddc702cb0f3b4a06043303b334dd993ab42b2c320ebfa98e5ce558448615b3f69638ba92cf7f43da5
payout_instructions: c2b08e41d766da4116e388357ed957d04ad754623a915f3fd65188a8746cf3e8
pool_url: localhost
p2_singleton_puzzle_hash: 2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824
target_puzzle_hash: 344587cf06a39db471d2cc027504e8688a0a67cce961253500c956c73603fd58
""" # noqa
log = logging.getLogger(__name__)
@dataclass(frozen=True)
@streamable
class PoolWalletConfig(Streamable):
launcher_id: bytes32
pool_url: str
payout_instructions: str
target_puzzle_hash: bytes32
p2_singleton_puzzle_hash: bytes32
owner_public_key: G1Element
authentication_public_key: G1Element
def load_pool_config(root_path: Path) -> List[PoolWalletConfig]:
config = load_config(root_path, "config.yaml")
ret_list: List[PoolWalletConfig] = []
if "pool_list" in config["pool"]:
for pool_config_dict in config["pool"]["pool_list"]:
try:
pool_config = PoolWalletConfig(
hexstr_to_bytes(pool_config_dict["launcher_id"]),
pool_config_dict["pool_url"],
pool_config_dict["payout_instructions"],
hexstr_to_bytes(pool_config_dict["target_puzzle_hash"]),
hexstr_to_bytes(pool_config_dict["p2_singleton_puzzle_hash"]),
G1Element.from_bytes(hexstr_to_bytes(pool_config_dict["owner_public_key"])),
G1Element.from_bytes(hexstr_to_bytes(pool_config_dict["authentication_public_key"])),
)
ret_list.append(pool_config)
except Exception as e:
log.error(f"Exception loading config: {pool_config_dict} {e}")
return ret_list
async def update_pool_config(root_path: Path, pool_config_list: List[PoolWalletConfig]):
full_config = load_config(root_path, "config.yaml")
full_config["pool"]["pool_list"] = [c.to_json_dict() for c in pool_config_list]
save_config(root_path, "config.yaml", full_config)
| 41.925373 | 127 | 0.756853 |
a5453e5657059ebcc18cd6dc46c15a4c13e89ea2
| 2,038 |
py
|
Python
|
ext/iotkit/sdk/tools/virgil-trust-provisioner/virgil_trust_provisioner/generators/keys/interface.py
|
andr13/yiot-yocto-test
|
1a0942318a2fb244c2a5a2ff086be7d0b7ea0deb
|
[
"BSD-2-Clause"
] | 19 |
2019-11-20T20:11:54.000Z
|
2021-09-30T13:06:57.000Z
|
ext/iotkit/sdk/tools/virgil-trust-provisioner/virgil_trust_provisioner/generators/keys/interface.py
|
andr13/yiot-yocto-test
|
1a0942318a2fb244c2a5a2ff086be7d0b7ea0deb
|
[
"BSD-2-Clause"
] | 1 |
2020-01-27T15:21:26.000Z
|
2020-01-27T15:21:26.000Z
|
ext/iotkit/sdk/tools/virgil-trust-provisioner/virgil_trust_provisioner/generators/keys/interface.py
|
andr13/yiot-yocto-test
|
1a0942318a2fb244c2a5a2ff086be7d0b7ea0deb
|
[
"BSD-2-Clause"
] | 4 |
2020-01-04T19:19:43.000Z
|
2020-04-19T11:34:38.000Z
|
from abc import ABC, abstractmethod
from virgil_trust_provisioner import consts
class KeyGeneratorInterface(ABC):
@abstractmethod
def generate(self, *,
signature_limit,
rec_pub_keys,
signer_key,
private_key_base64,
start_date,
expire_date,
meta_data):
pass
@property
@abstractmethod
def ec_type(self):
pass
@property
def ec_type_secmodule(self) -> int:
t = consts.ec_type_vs_to_secmodule_map.get(self.ec_type, None)
if t is None:
raise ValueError("Can`t find SECMODULE EC key type for %s Virgil type" % self.ec_type)
return t.value
@property
@abstractmethod
def hash_type(self):
pass
@property
def hash_type_secmodule(self):
t = consts.hash_type_vs_to_secmodule_map.get(self.hash_type, None)
if t is None:
raise ValueError("Can`t find SECMODULE hash type for %s Virgil hash type" % self.hash_type)
return t
@property
@abstractmethod
def private_key(self):
pass
@property
@abstractmethod
def public_key(self):
pass
@property
@abstractmethod
def signature(self):
pass
@property
@abstractmethod
def key_id(self):
pass
@property
@abstractmethod
def key_type(self):
pass
@property
def key_type_secmodule(self) -> int:
vs_type = consts.VSKeyTypeS(self.key_type)
t = consts.key_type_str_to_num_map.get(vs_type, None)
if t is None:
raise ValueError("Can`t find SECMODULE key type for %s Virgil key type" % self.key_type)
return t.value
@abstractmethod
def sign(self, data, long_sign):
pass
@abstractmethod
def verify(self, data, signature, long_sign):
pass
@abstractmethod
def encrypt(self, data):
pass
@abstractmethod
def decrypt(self, data):
pass
| 22.395604 | 103 | 0.603533 |
fb343ac9805ea67db78851a3f6b9ed40fa0a2e9b
| 5,264 |
py
|
Python
|
detection/LED_noise.py
|
Cornell-iGEM/iGEM-Detection
|
6bedd8d3cd9a8e316fd744aeb515bc0b4f393a9a
|
[
"MIT"
] | null | null | null |
detection/LED_noise.py
|
Cornell-iGEM/iGEM-Detection
|
6bedd8d3cd9a8e316fd744aeb515bc0b4f393a9a
|
[
"MIT"
] | 1 |
2017-10-20T00:04:10.000Z
|
2017-10-20T00:04:10.000Z
|
detection/LED_noise.py
|
Cornell-iGEM/iGEM-Detection
|
6bedd8d3cd9a8e316fd744aeb515bc0b4f393a9a
|
[
"MIT"
] | null | null | null |
import cv2
import cv2.cv as cv
import numpy as np
import signal, os, subprocess, sys
import time
import threading
import requests
import io
from picamera.array import PiRGBArray
from picamera import PiCamera
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
from fractions import Fraction
import csv
def integral(x1, x2, y1, y2, table):
return table[y1][x1][0] + table[y2][x2][0] - table[y1][x2][0] - table[y2][x1][0]
#pin numbers on pi for LEDs
excite_low_pin = 18
GPIO.setup( excite_low_pin, GPIO.OUT)
excite_high_pin = 23
GPIO.setup( excite_high_pin, GPIO.OUT)
pdawn_pin = 20
GPIO.setup( pdawn_pin, GPIO.OUT)
camera = PiCamera()
camera.framerate = 32
#camera.framerate = Fraction(1,6)
raw_capture = PiRGBArray(camera)
output = PiRGBArray(camera)
time.sleep(0.1)
"""
#g = camera.awb_gains
g = (Fraction(1, 1), Fraction(1,1))
print g
camera.exposure_mode = 'off'
camera.shutter_speed = 500000
camera.awb_mode = 'off'
camera.awb_gains = g
camera.capture(output, format="bgr")
img = output.array
b,g,r = cv2.split(img)
cv2.imshow('frame',g)
key = cv2.waitKey(0) & 0xFF
"""
camera.awb_mode = 'off'
camera.awb_gains = (Fraction(5,4), Fraction(4,3))
#camera.shutter_speed = 32000 #for darker environments
camera.shutter_speed = 3200*3 #light testing
#pwm = GPIO.PWM(18, 100)
#pwm.start(1)
redLower = np.array((0,50, 150))
redUpper = np.array((330, 255,255))
def brightnessvalue(frame, redLower, redUpper):
#Avisha: ball tracking
#print('block test 2')
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
#cv2.imshow('gr', frame)
#key = cv2.waitKey(0) & 0xFF
#construct mask, dilations and erosions to remove noise
mask = cv2.inRange(hsv, redLower, redUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
#find contours in the mask, initialize current center (x,y)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
b,g,r = cv2.split(frame)
b = cv2.bitwise_and(b, mask)
g = cv2.bitwise_and(g, mask)
r = cv2.bitwise_and(r, mask)
frame = cv2.merge((b,g,r))
averagemask = cv2.mean(frame, mask= mask)
integral_table = cv2.integral(frame)
image_y = int(frame.shape[0])
image_x = int(frame.shape[1])
#cv2.imshow('gr', frame)
#key = cv2.waitKey(0) & 0xFF
#only proceed if at least one contour was found
if len (cnts) > 0:
#find largest contour, use it to compute min enclosed cirlce
#and centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
bounds = max(0, x -radius), min(image_x-1, x + radius), max(0, y - radius), min(image_y-1, y + radius)
#print(bounds)
img_integral = integral(bounds[0], bounds[1], bounds[2], bounds[3], integral_table)
#img_integral = integral(0, image_x, 0, image_y, integral_table)
area = (bounds[1] - bounds[0]) * (bounds[3] - bounds[2])
#print(img_integral/area)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
#proceed if radius is min size --NEED TO FIGURE OUT
#if radius > 1:
#draw the circle and centroid on the frame,
#then update the list of tracked points
# cv2.circle(frame, (int(x), int(y)), int(radius),
# (0, 255, 255), 2)
# cv2.circle(frame, center, 5, (0, 0, 255), -1)
return img_integral/area
# show the frame to our screen
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#gray = frame
return 0
csvfile = open('LED.csv', 'wb')
try:
#make function which takes in frame, lower and uppper bound for hue saturation value, return integral
fieldnames = ['emission1', 'emission2', 'time']
csvwriter = csv.DictWriter(csvfile, fieldnames=fieldnames)
csvwriter.writeheader()
while True:
#response = raw_input("ledsample")
#if response == "q":
# break
#print('block test 1')
#low excitation
GPIO.output( excite_low_pin, GPIO.HIGH)
time.sleep(0.1)
camera.capture(raw_capture, format='bgr')
frame = raw_capture.array
x = brightnessvalue(frame, redLower, redUpper)
GPIO.output( excite_low_pin, GPIO.LOW)
raw_capture.truncate(0)
#high excitation
#take new picture
GPIO.output( excite_high_pin, GPIO.HIGH)
time.sleep(0.1)
camera.capture(raw_capture, format='bgr')
frame = raw_capture.array
y = brightnessvalue(frame, redLower, redUpper)
GPIO.output( excite_high_pin, GPIO.LOW)
raw_capture.truncate(0)
if x != 0 and y != 0:
ratio = x/y
else:
ratio = -1
data = {"emission1": x, "emission2": y, "time": time.ctime()}
csvwriter.writerow(data)
csvfile.flush()
#url = 'http://citronnade.mooo.com/rfp'
print(data)
# requests.post(url, data=data)
finally:
cv2.destroyAllWindows()
camera.close()
#pwm.stop()
GPIO.cleanup()
csvfile.close()
| 30.604651 | 110 | 0.621771 |
bc1e1390d6434b724e79f6e81a0778ebd458ab3f
| 331 |
py
|
Python
|
src/databricksbundle/notebook/path/DatabricksNotebookPathResolver.py
|
chlubnamarek/databricks-bundle
|
ba3da5ba5e25046e7010fe505e0021738708f525
|
[
"MIT"
] | null | null | null |
src/databricksbundle/notebook/path/DatabricksNotebookPathResolver.py
|
chlubnamarek/databricks-bundle
|
ba3da5ba5e25046e7010fe505e0021738708f525
|
[
"MIT"
] | null | null | null |
src/databricksbundle/notebook/path/DatabricksNotebookPathResolver.py
|
chlubnamarek/databricks-bundle
|
ba3da5ba5e25046e7010fe505e0021738708f525
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from databricksbundle.notebook.helpers import getNotebookPath
from databricksbundle.notebook.path.NotebookPathResolverInterface import NotebookPathResolverInterface
class DatabricksNotebookPathResolver(NotebookPathResolverInterface):
def resolve(self) -> Path:
return Path(getNotebookPath())
| 36.777778 | 102 | 0.845921 |
50cd2c44652121e41cfb1322899051eb9b1a6e24
| 280 |
py
|
Python
|
examples/spot/stream/isolated_margin/renew_isolated_margin_listen_key.py
|
Banging12/binance-connector-python
|
dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b
|
[
"MIT"
] | 512 |
2021-06-15T08:52:44.000Z
|
2022-03-31T09:49:53.000Z
|
examples/spot/stream/isolated_margin/renew_isolated_margin_listen_key.py
|
Banging12/binance-connector-python
|
dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b
|
[
"MIT"
] | 75 |
2021-06-20T13:49:50.000Z
|
2022-03-30T02:45:31.000Z
|
examples/spot/stream/isolated_margin/renew_isolated_margin_listen_key.py
|
Banging12/binance-connector-python
|
dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b
|
[
"MIT"
] | 156 |
2021-06-18T11:56:36.000Z
|
2022-03-29T16:34:22.000Z
|
#!/usr/bin/env python
import logging
from binance.spot import Spot as Client
from binance.lib.utils import config_logging
config_logging(logging, logging.DEBUG)
key = ""
client = Client(key)
logging.info(client.renew_isolated_margin_listen_key(symbol="BTCUSDT", listenKey=""))
| 23.333333 | 85 | 0.792857 |
ea14ff063ff42ab1b9071c47a5ba9503b7a3e4c3
| 3,044 |
py
|
Python
|
zeus/model_zoo/model_zoo.py
|
wnov/vega
|
bf51cbe389d41033c4ae4bc02e5078c3c247c845
|
[
"MIT"
] | 6 |
2020-11-13T15:44:47.000Z
|
2021-12-02T08:14:06.000Z
|
zeus/model_zoo/model_zoo.py
|
JacobLee121/vega
|
19256aca4d047bfad3b461f0a927e1c2abb9eb03
|
[
"MIT"
] | null | null | null |
zeus/model_zoo/model_zoo.py
|
JacobLee121/vega
|
19256aca4d047bfad3b461f0a927e1c2abb9eb03
|
[
"MIT"
] | 2 |
2021-06-25T09:42:32.000Z
|
2021-08-06T18:00:09.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Model zoo."""
import zeus
import logging
import os
from zeus.networks.network_desc import NetworkDesc
from zeus.common.general import General
class ModelZoo(object):
"""Model zoo."""
@classmethod
def set_location(cls, location):
"""Set model zoo location.
:param location: model zoo location.
:type localtion: str.
"""
General.model_zoo.model_zoo_path = location
@classmethod
def get_model(cls, model_desc=None, pretrained_model_file=None):
"""Get model from model zoo.
:param network_name: the name of network, eg. ResNetVariant.
:type network_name: str or None.
:param network_desc: the description of network.
:type network_desc: str or None.
:param pretrained_model_file: path of model.
:type pretrained_model_file: str.
:return: model.
:rtype: model.
"""
try:
network = NetworkDesc(model_desc)
model = network.to_model()
except Exception as e:
logging.error("Failed to get model, model_desc={}, msg={}".format(
model_desc, str(e)))
raise e
logging.info("Model was created.")
if zeus.is_torch_backend() and pretrained_model_file:
model = cls._load_pretrained_model(model, pretrained_model_file)
elif zeus.is_ms_backend() and pretrained_model_file:
model = cls._load_pretrained_model(model, pretrained_model_file)
return model
@classmethod
def _load_pretrained_model(cls, model, pretrained_model_file):
if zeus.is_torch_backend():
import torch
if not os.path.isfile(pretrained_model_file):
raise "Pretrained model is not existed, model={}".format(pretrained_model_file)
logging.info("load model weights from file, weights file={}".format(pretrained_model_file))
checkpoint = torch.load(pretrained_model_file)
model.load_state_dict(checkpoint)
elif zeus.is_ms_backend():
from mindspore.train.serialization import load_checkpoint
load_checkpoint(pretrained_model_file, net=model)
return model
@classmethod
def select_compressed_models(cls, model_zoo_file, standard, num):
"""Select compressed model by model filter."""
from zeus.model_zoo.compressed_model_filter import CompressedModelFilter
model_filter = CompressedModelFilter(model_zoo_file)
model_desc_list = model_filter.select_satisfied_model(standard, num)
return model_desc_list
| 37.580247 | 103 | 0.677727 |
420ee27ba734b063f99b78a7b53e3a81c5afcc8f
| 1,830 |
py
|
Python
|
dags/p2p_setup.py
|
ezubatov-uipath/airflow_test_dags
|
e997f5c2ae956c6e3bdd36220bd6c9cf580da441
|
[
"Apache-2.0"
] | null | null | null |
dags/p2p_setup.py
|
ezubatov-uipath/airflow_test_dags
|
e997f5c2ae956c6e3bdd36220bd6c9cf580da441
|
[
"Apache-2.0"
] | null | null | null |
dags/p2p_setup.py
|
ezubatov-uipath/airflow_test_dags
|
e997f5c2ae956c6e3bdd36220bd6c9cf580da441
|
[
"Apache-2.0"
] | null | null | null |
from datetime import timedelta
from airflow import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import PythonOperator, PythonVirtualenvOperator
#from airflow.providers.snowflake.operators.snowflake import SnowflakeOperator
import os
import uuid
#from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient, __version__
from airflow.utils.dates import days_ago
from airflow.contrib.hooks.wasb_hook import WasbHook
args = {
'owner': '[email protected]',
}
with DAG(
dag_id='p2p_setup_1',
default_args=args,
schedule_interval=None,
start_date=days_ago(2),
dagrun_timeout=timedelta(minutes=60),
tags=['p2p', 'setup'],
params={},
) as dag:
title_task = BashOperator(
task_id='show_title',
bash_command='echo "Running p2p setup script"',
)
def print_context(ds, **kwargs):
"""Print the Airflow context and ds variable from the context."""
print(kwargs)
print(ds)
return 'Whatever you return gets printed in the logs'
print_context_task = PythonOperator(
task_id='print_the_context',
python_callable=print_context,
)
title_task >> print_context_task
def create_container(ds, **kwargs):
hook = WasbHook('pmc_wasb')
print(hook.get_connection('pmc_wasb').get_uri)
container_name = kwargs['dag_run'].conf.get('application_id') #"{{ dag_run.conf['application_id'] }}"
print(f"creating container: {container_name}")
container = hook.create_container(container_name)
return
create_container_task = PythonOperator(
task_id='create_container',
python_callable=create_container,
)
print_context_task >> create_container_task
| 31.016949 | 109 | 0.716393 |
c99e539b9a6942e390235f5b43e6d2d0e8f3601a
| 48,768 |
py
|
Python
|
feconf.py
|
UJJWAL-1711/oppia
|
025d4b27df14d21a21c679bfe6e9b517fde7a1d0
|
[
"Apache-2.0"
] | null | null | null |
feconf.py
|
UJJWAL-1711/oppia
|
025d4b27df14d21a21c679bfe6e9b517fde7a1d0
|
[
"Apache-2.0"
] | null | null | null |
feconf.py
|
UJJWAL-1711/oppia
|
025d4b27df14d21a21c679bfe6e9b517fde7a1d0
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stores various configuration options and constants for Oppia."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import copy
import datetime
import os
from constants import constants
# The datastore model ID for the list of featured activity references. This
# value should not be changed.
ACTIVITY_REFERENCE_LIST_FEATURED = 'featured'
ALL_ACTIVITY_REFERENCE_LIST_TYPES = [ACTIVITY_REFERENCE_LIST_FEATURED]
# The values which a post_commit_status can have: public, private.
POST_COMMIT_STATUS_PUBLIC = 'public'
POST_COMMIT_STATUS_PRIVATE = 'private'
# Whether to unconditionally log info messages.
DEBUG = False
# When DEV_MODE is true check that we are running in development environment.
# The SERVER_SOFTWARE environment variable does not exist in Travis, hence the
# need for an explicit check.
if (constants.DEV_MODE and os.getenv('SERVER_SOFTWARE') and
not os.getenv('SERVER_SOFTWARE', default='').startswith('Development')):
raise Exception('DEV_MODE can\'t be true on production.')
CLASSIFIERS_DIR = os.path.join('extensions', 'classifiers')
TESTS_DATA_DIR = os.path.join('core', 'tests', 'data')
SAMPLE_EXPLORATIONS_DIR = os.path.join('data', 'explorations')
SAMPLE_COLLECTIONS_DIR = os.path.join('data', 'collections')
CONTENT_VALIDATION_DIR = os.path.join('core', 'domain')
# backend_prod_files contain processed JS and HTML files that are served by
# Jinja, we are moving away from Jinja so this folder might not be needed later
# (#6964)
EXTENSIONS_DIR_PREFIX = (
'backend_prod_files' if not constants.DEV_MODE else '')
ACTIONS_DIR = (
os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'actions'))
ISSUES_DIR = (
os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'issues'))
INTERACTIONS_DIR = (
os.path.join('extensions', 'interactions'))
INTERACTIONS_LEGACY_SPECS_FILE_DIR = (
os.path.join(INTERACTIONS_DIR, 'legacy_interaction_specs'))
INTERACTIONS_SPECS_FILE_PATH = (
os.path.join(INTERACTIONS_DIR, 'interaction_specs.json'))
RTE_EXTENSIONS_DIR = (
os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'rich_text_components'))
RTE_EXTENSIONS_DEFINITIONS_PATH = (
os.path.join('assets', 'rich_text_components_definitions.ts'))
OBJECT_TEMPLATES_DIR = os.path.join('extensions', 'objects', 'templates')
# Choose production templates folder when we are in production mode.
FRONTEND_TEMPLATES_DIR = (
os.path.join('webpack_bundles') if constants.DEV_MODE else
os.path.join('backend_prod_files', 'webpack_bundles'))
DEPENDENCIES_TEMPLATES_DIR = (
os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'dependencies'))
VALUE_GENERATORS_DIR_FOR_JS = os.path.join(
'local_compiled_js', 'extensions', 'value_generators')
VALUE_GENERATORS_DIR = os.path.join('extensions', 'value_generators')
VISUALIZATIONS_DIR = os.path.join(
'extensions', 'visualizations')
VISUALIZATIONS_DIR_FOR_JS = os.path.join(
'local_compiled_js', 'extensions', 'visualizations')
OBJECT_DEFAULT_VALUES_FILE_PATH = os.path.join(
'extensions', 'objects', 'object_defaults.json')
RULES_DESCRIPTIONS_FILE_PATH = os.path.join(
os.getcwd(), 'extensions', 'interactions', 'rule_templates.json')
HTML_FIELD_TYPES_TO_RULE_SPECS_FILE_PATH = os.path.join(
os.getcwd(), 'extensions', 'interactions',
'html_field_types_to_rule_specs.json')
# A mapping of interaction ids to classifier properties.
# TODO(#10217): As of now we support only one algorithm per interaction.
# However, we do have the necessary storage infrastructure to support multiple
# algorithms per interaction. Hence, whenever we find a secondary algorithm
# candidate for any of the supported interactions, the logical functions to
# support multiple algorithms need to be implemented.
INTERACTION_CLASSIFIER_MAPPING = {
'TextInput': {
'algorithm_id': 'TextClassifier',
'algorithm_version': 1
},
}
# Classifier job time to live (in mins).
CLASSIFIER_JOB_TTL_MINS = 5
TRAINING_JOB_STATUS_COMPLETE = 'COMPLETE'
TRAINING_JOB_STATUS_FAILED = 'FAILED'
TRAINING_JOB_STATUS_NEW = 'NEW'
TRAINING_JOB_STATUS_PENDING = 'PENDING'
ALLOWED_TRAINING_JOB_STATUSES = [
TRAINING_JOB_STATUS_COMPLETE,
TRAINING_JOB_STATUS_FAILED,
TRAINING_JOB_STATUS_NEW,
TRAINING_JOB_STATUS_PENDING
]
# Allowed formats of how HTML is present in rule specs.
HTML_RULE_VARIABLE_FORMAT_SET = 'set'
HTML_RULE_VARIABLE_FORMAT_STRING = 'string'
HTML_RULE_VARIABLE_FORMAT_LIST_OF_SETS = 'listOfSets'
ALLOWED_HTML_RULE_VARIABLE_FORMATS = [
HTML_RULE_VARIABLE_FORMAT_SET,
HTML_RULE_VARIABLE_FORMAT_STRING,
HTML_RULE_VARIABLE_FORMAT_LIST_OF_SETS
]
ANSWER_TYPE_LIST_OF_SETS_OF_HTML = 'ListOfSetsOfHtmlStrings'
ANSWER_TYPE_SET_OF_HTML = 'SetOfHtmlString'
# The maximum number of characters allowed for userbio length.
MAX_BIO_LENGTH_IN_CHARS = 2000
ALLOWED_TRAINING_JOB_STATUS_CHANGES = {
TRAINING_JOB_STATUS_COMPLETE: [],
TRAINING_JOB_STATUS_NEW: [TRAINING_JOB_STATUS_PENDING],
TRAINING_JOB_STATUS_PENDING: [TRAINING_JOB_STATUS_COMPLETE,
TRAINING_JOB_STATUS_FAILED],
TRAINING_JOB_STATUS_FAILED: [TRAINING_JOB_STATUS_NEW]
}
# Allowed formats of how HTML is present in rule specs.
HTML_RULE_VARIABLE_FORMAT_SET = 'set'
HTML_RULE_VARIABLE_FORMAT_STRING = 'string'
HTML_RULE_VARIABLE_FORMAT_LIST_OF_SETS = 'listOfSets'
ALLOWED_HTML_RULE_VARIABLE_FORMATS = [
HTML_RULE_VARIABLE_FORMAT_SET,
HTML_RULE_VARIABLE_FORMAT_STRING,
HTML_RULE_VARIABLE_FORMAT_LIST_OF_SETS
]
ANSWER_TYPE_LIST_OF_SETS_OF_HTML = 'ListOfSetsOfHtmlStrings'
ANSWER_TYPE_SET_OF_HTML = 'SetOfHtmlString'
ENTITY_TYPE_EXPLORATION = 'exploration'
ENTITY_TYPE_TOPIC = 'topic'
ENTITY_TYPE_SKILL = 'skill'
ENTITY_TYPE_STORY = 'story'
ENTITY_TYPE_QUESTION = 'question'
ENTITY_TYPE_VOICEOVER_APPLICATION = 'voiceover_application'
IMAGE_CONTEXT_QUESTION_SUGGESTIONS = 'question_suggestions'
IMAGE_CONTEXT_EXPLORATION_SUGGESTIONS = 'exploration_suggestions'
MAX_TASK_MODELS_PER_FETCH = 25
MAX_TASK_MODELS_PER_HISTORY_PAGE = 10
# The maximum number of activities allowed in the playlist of the learner. This
# limit applies to both the explorations playlist and the collections playlist.
MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT = 10
# The minimum number of training samples required for training a classifier.
MIN_TOTAL_TRAINING_EXAMPLES = 50
# The minimum number of assigned labels required for training a classifier.
MIN_ASSIGNED_LABELS = 2
# Default label for classification algorithms.
DEFAULT_CLASSIFIER_LABEL = '_default'
# The maximum number of results to retrieve in a datastore query.
DEFAULT_QUERY_LIMIT = 1000
# The maximum number of results to retrieve in a datastore query
# for top rated published explorations in /library page.
NUMBER_OF_TOP_RATED_EXPLORATIONS_FOR_LIBRARY_PAGE = 8
# The maximum number of results to retrieve in a datastore query
# for recently published explorations in /library page.
RECENTLY_PUBLISHED_QUERY_LIMIT_FOR_LIBRARY_PAGE = 8
# The maximum number of results to retrieve in a datastore query
# for top rated published explorations in /library/top_rated page.
NUMBER_OF_TOP_RATED_EXPLORATIONS_FULL_PAGE = 20
# The maximum number of results to retrieve in a datastore query
# for recently published explorations in /library/recently_published page.
RECENTLY_PUBLISHED_QUERY_LIMIT_FULL_PAGE = 20
# The current version of the dashboard stats blob schema. If any backward-
# incompatible changes are made to the stats blob schema in the data store,
# this version number must be changed.
CURRENT_DASHBOARD_STATS_SCHEMA_VERSION = 1
# The current version of the exploration states blob schema. If any backward-
# incompatible changes are made to the states blob schema in the data store,
# this version number must be changed and the exploration migration job
# executed.
CURRENT_STATE_SCHEMA_VERSION = 38
# The current version of the all collection blob schemas (such as the nodes
# structure within the Collection domain object). If any backward-incompatible
# changes are made to any of the blob schemas in the data store, this version
# number must be changed.
CURRENT_COLLECTION_SCHEMA_VERSION = 6
# The current version of story contents dict in the story schema.
CURRENT_STORY_CONTENTS_SCHEMA_VERSION = 4
# The current version of skill contents dict in the skill schema.
CURRENT_SKILL_CONTENTS_SCHEMA_VERSION = 2
# The current version of misconceptions dict in the skill schema.
CURRENT_MISCONCEPTIONS_SCHEMA_VERSION = 3
# The current version of rubric dict in the skill schema.
CURRENT_RUBRIC_SCHEMA_VERSION = 3
# The current version of subtopics dict in the topic schema.
CURRENT_SUBTOPIC_SCHEMA_VERSION = 3
# The current version of story reference dict in the topic schema.
CURRENT_STORY_REFERENCE_SCHEMA_VERSION = 1
# The current version of page_contents dict in the subtopic page schema.
CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION = 2
# This value should be updated in the event of any
# StateAnswersModel.submitted_answer_list schema change.
CURRENT_STATE_ANSWERS_SCHEMA_VERSION = 1
# This value should be updated if the schema of LearnerAnswerInfo
# dict schema changes.
CURRENT_LEARNER_ANSWER_INFO_SCHEMA_VERSION = 1
# This value should be updated if the schema of PlatformParameterRule dict
# schema changes.
CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION = 1
# The default number of exploration tiles to load at a time in the search
# results page.
SEARCH_RESULTS_PAGE_SIZE = 20
# The default number of commits to show on a page in the exploration history
# tab.
COMMIT_LIST_PAGE_SIZE = 50
# The default number of items to show on a page in the exploration feedback
# tab.
FEEDBACK_TAB_PAGE_SIZE = 20
# The default number of opportunities to show on contributor dashboard page.
OPPORTUNITIES_PAGE_SIZE = 20
# The maximum number of top unresolved answers which should be aggregated
# from all of the submitted answers.
TOP_UNRESOLVED_ANSWERS_LIMIT = 20
# Default title for a newly-minted exploration.
DEFAULT_EXPLORATION_TITLE = ''
# Default category for a newly-minted exploration.
DEFAULT_EXPLORATION_CATEGORY = ''
# Default objective for a newly-minted exploration.
DEFAULT_EXPLORATION_OBJECTIVE = ''
# NOTE TO DEVELOPERS: If any of the 5 constants below are modified, the
# corresponding field in NEW_STATE_TEMPLATE in constants.js also has to be
# modified.
# Default name for the initial state of an exploration.
DEFAULT_INIT_STATE_NAME = 'Introduction'
# Default content id for the state's content.
DEFAULT_NEW_STATE_CONTENT_ID = 'content'
# Default content id for the interaction's default outcome.
DEFAULT_OUTCOME_CONTENT_ID = 'default_outcome'
# Default content id for the explanation in the concept card of a skill.
DEFAULT_EXPLANATION_CONTENT_ID = 'explanation'
# Default recorded_voiceovers dict for a default state template.
DEFAULT_RECORDED_VOICEOVERS = {
'voiceovers_mapping': {
'content': {},
'default_outcome': {}
}
}
# Default written_translations dict for a default state template.
DEFAULT_WRITTEN_TRANSLATIONS = {
'translations_mapping': {
'content': {},
'default_outcome': {}
}
}
# The default content text for the initial state of an exploration.
DEFAULT_INIT_STATE_CONTENT_STR = ''
# Whether new explorations should have automatic text-to-speech enabled
# by default.
DEFAULT_AUTO_TTS_ENABLED = True
# Default title for a newly-minted collection.
DEFAULT_COLLECTION_TITLE = ''
# Default category for a newly-minted collection.
DEFAULT_COLLECTION_CATEGORY = ''
# Default objective for a newly-minted collection.
DEFAULT_COLLECTION_OBJECTIVE = ''
# Default description for a newly-minted story.
DEFAULT_STORY_DESCRIPTION = ''
# Default notes for a newly-minted story.
DEFAULT_STORY_NOTES = ''
# Default explanation for a newly-minted skill.
DEFAULT_SKILL_EXPLANATION = ''
# Default name for a newly-minted misconception.
DEFAULT_MISCONCEPTION_NAME = ''
# Default notes for a newly-minted misconception.
DEFAULT_MISCONCEPTION_NOTES = ''
# Default feedback for a newly-minted misconception.
DEFAULT_MISCONCEPTION_FEEDBACK = ''
# Default content_id for explanation subtitled html.
DEFAULT_SKILL_EXPLANATION_CONTENT_ID = 'explanation'
# Default description for a newly-minted topic.
DEFAULT_TOPIC_DESCRIPTION = ''
# Default abbreviated name for a newly-minted topic.
DEFAULT_ABBREVIATED_TOPIC_NAME = ''
# Default content id for the subtopic page's content.
DEFAULT_SUBTOPIC_PAGE_CONTENT_ID = 'content'
# Default ID of VM which is used for training classifier.
DEFAULT_VM_ID = 'vm_default'
# Shared secret key for default VM.
DEFAULT_VM_SHARED_SECRET = '1a2b3c4e'
IMAGE_FORMAT_JPEG = 'jpeg'
IMAGE_FORMAT_PNG = 'png'
IMAGE_FORMAT_GIF = 'gif'
IMAGE_FORMAT_SVG = 'svg'
# An array containing the accepted image formats (as determined by the imghdr
# module) and the corresponding allowed extensions in the filenames of uploaded
# images.
ACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS = {
IMAGE_FORMAT_JPEG: ['jpg', 'jpeg'],
IMAGE_FORMAT_PNG: ['png'],
IMAGE_FORMAT_GIF: ['gif'],
IMAGE_FORMAT_SVG: ['svg']
}
# An array containing the image formats that can be compressed.
COMPRESSIBLE_IMAGE_FORMATS = [
IMAGE_FORMAT_JPEG, IMAGE_FORMAT_PNG, IMAGE_FORMAT_GIF]
# An array containing the accepted audio extensions for uploaded files and
# the corresponding MIME types.
ACCEPTED_AUDIO_EXTENSIONS = {
'mp3': ['audio/mp3']
}
# Prefix for data sent from the server to the client via JSON.
XSSI_PREFIX = ')]}\'\n'
# A regular expression for alphanumeric characters.
ALPHANUMERIC_REGEX = r'^[A-Za-z0-9]+$'
# These are here rather than in rating_services.py to avoid import
# circularities with exp_services.
# TODO(Jacob): Refactor exp_services to remove this problem.
_EMPTY_RATINGS = {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0}
def get_empty_ratings():
"""Returns a copy of the empty ratings object.
Returns:
dict. Copy of the '_EMPTY_RATINGS' dict object which contains the empty
ratings.
"""
return copy.deepcopy(_EMPTY_RATINGS)
# Empty scaled average rating as a float.
EMPTY_SCALED_AVERAGE_RATING = 0.0
# To use mailgun email service.
EMAIL_SERVICE_PROVIDER_MAILGUN = 'mailgun_email_service'
# Use GAE email service by default.
EMAIL_SERVICE_PROVIDER = EMAIL_SERVICE_PROVIDER_MAILGUN
# If the Mailgun email API is used, the "None" below should be replaced
# with the Mailgun API key.
MAILGUN_API_KEY = None
# If the Mailgun email API is used, the "None" below should be replaced
# with the Mailgun domain name (ending with mailgun.org).
MAILGUN_DOMAIN_NAME = None
# Replace this with the correct Redis Host and Port when switching to prod
# server. Keep this in sync with redis.conf in the root folder. Specifically,
# REDISPORT should always be the same as the port in redis.conf.
REDISHOST = 'localhost'
REDISPORT = 6379
# Committer id for system actions. The username for the system committer
# (i.e. admin) is also 'admin'.
SYSTEM_COMMITTER_ID = 'admin'
# Domain name for email address.
INCOMING_EMAILS_DOMAIN_NAME = 'example.com'
SYSTEM_EMAIL_ADDRESS = '[email protected]'
SYSTEM_EMAIL_NAME = '.'
ADMIN_EMAIL_ADDRESS = '[email protected]'
NOREPLY_EMAIL_ADDRESS = '[email protected]'
# Ensure that SYSTEM_EMAIL_ADDRESS and ADMIN_EMAIL_ADDRESS are both valid and
# correspond to owners of the app before setting this to True. If
# SYSTEM_EMAIL_ADDRESS is not that of an app owner, email messages from this
# address cannot be sent. If True then emails can be sent to any user.
CAN_SEND_EMAILS = False
# If you want to turn on this facility please check the email templates in the
# send_role_notification_email() function in email_manager.py and modify them
# accordingly.
CAN_SEND_EDITOR_ROLE_EMAILS = False
# If enabled then emails will be sent to creators for feedback messages.
CAN_SEND_FEEDBACK_MESSAGE_EMAILS = False
# If enabled subscription emails will be sent to that user.
CAN_SEND_SUBSCRIPTION_EMAILS = False
# Time to wait before sending feedback message emails (currently set to 1
# hour).
DEFAULT_FEEDBACK_MESSAGE_EMAIL_COUNTDOWN_SECS = 3600
# Whether to send an email when new feedback message is received for
# an exploration.
DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE = True
# Whether to send an email to all the creator's subscribers when he/she
# publishes an exploration.
DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE = True
# Whether exploration feedback emails are muted,
# when the user has not specified a preference.
DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE = False
# Whether exploration suggestion emails are muted,
# when the user has not specified a preference.
DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE = False
# Whether to send email updates to a user who has not specified a preference.
DEFAULT_EMAIL_UPDATES_PREFERENCE = False
# Whether to send an invitation email when the user is granted
# new role permissions in an exploration.
DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE = True
# Whether to require an email to be sent, following a moderator action.
REQUIRE_EMAIL_ON_MODERATOR_ACTION = False
# Timespan in minutes before allowing duplicate emails.
DUPLICATE_EMAIL_INTERVAL_MINS = 2
# Number of digits after decimal to which the average ratings value in the
# dashboard is rounded off to.
AVERAGE_RATINGS_DASHBOARD_PRECISION = 2
# Whether to enable maintenance mode on the site. For non-admins, this redirects
# all HTTP requests to the maintenance page. This is the only check which
# determines whether the site is in maintenance mode to avoid queries to the
# database by non-admins.
ENABLE_MAINTENANCE_MODE = False
# The interactions permissible for a question.
ALLOWED_QUESTION_INTERACTION_IDS = [
'TextInput', 'MultipleChoiceInput', 'NumericInput']
# Flag to disable sending emails related to reviews for suggestions. To be
# flipped after deciding (and implementing) whether a user should be scored
# only for curated lessons.
SEND_SUGGESTION_REVIEW_RELATED_EMAILS = False
# To prevent recording scores for users until details like whether to score
# users for only curated lessons is confirmed.
ENABLE_RECORDING_OF_SCORES = False
# No. of pretest questions to display.
NUM_PRETEST_QUESTIONS = 3
# Maximum allowed commit message length for SnapshotMetadata models.
MAX_COMMIT_MESSAGE_LENGTH = 1000
EMAIL_INTENT_SIGNUP = 'signup'
EMAIL_INTENT_DAILY_BATCH = 'daily_batch'
EMAIL_INTENT_EDITOR_ROLE_NOTIFICATION = 'editor_role_notification'
EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION = 'feedback_message_notification'
EMAIL_INTENT_SUBSCRIPTION_NOTIFICATION = 'subscription_notification'
EMAIL_INTENT_SUGGESTION_NOTIFICATION = 'suggestion_notification'
EMAIL_INTENT_REPORT_BAD_CONTENT = 'report_bad_content'
EMAIL_INTENT_MARKETING = 'marketing'
EMAIL_INTENT_UNPUBLISH_EXPLORATION = 'unpublish_exploration'
EMAIL_INTENT_DELETE_EXPLORATION = 'delete_exploration'
EMAIL_INTENT_QUERY_STATUS_NOTIFICATION = 'query_status_notification'
EMAIL_INTENT_ONBOARD_REVIEWER = 'onboard_reviewer'
EMAIL_INTENT_REMOVE_REVIEWER = 'remove_reviewer'
EMAIL_INTENT_REVIEW_SUGGESTIONS = 'review_suggestions'
EMAIL_INTENT_VOICEOVER_APPLICATION_UPDATES = 'voiceover_application_updates'
EMAIL_INTENT_ACCOUNT_DELETED = 'account_deleted'
# Possible intents for email sent in bulk.
BULK_EMAIL_INTENT_MARKETING = 'bulk_email_marketing'
BULK_EMAIL_INTENT_IMPROVE_EXPLORATION = 'bulk_email_improve_exploration'
BULK_EMAIL_INTENT_CREATE_EXPLORATION = 'bulk_email_create_exploration'
BULK_EMAIL_INTENT_CREATOR_REENGAGEMENT = 'bulk_email_creator_reengagement'
BULK_EMAIL_INTENT_LEARNER_REENGAGEMENT = 'bulk_email_learner_reengagement'
BULK_EMAIL_INTENT_TEST = 'bulk_email_test'
MESSAGE_TYPE_FEEDBACK = 'feedback'
MESSAGE_TYPE_SUGGESTION = 'suggestion'
MODERATOR_ACTION_UNPUBLISH_EXPLORATION = 'unpublish_exploration'
DEFAULT_SALUTATION_HTML_FN = (
lambda recipient_username: 'Hi %s,' % recipient_username)
DEFAULT_SIGNOFF_HTML_FN = (
lambda sender_username: (
'Thanks!<br>%s (Oppia moderator)' % sender_username))
VALID_MODERATOR_ACTIONS = {
MODERATOR_ACTION_UNPUBLISH_EXPLORATION: {
'email_config': 'unpublish_exploration_email_html_body',
'email_subject_fn': (
lambda exp_title: (
'Your Oppia exploration "%s" has been unpublished' % exp_title)
),
'email_intent': 'unpublish_exploration',
'email_salutation_html_fn': DEFAULT_SALUTATION_HTML_FN,
'email_signoff_html_fn': DEFAULT_SIGNOFF_HTML_FN,
},
}
# When the site terms were last updated, in UTC.
REGISTRATION_PAGE_LAST_UPDATED_UTC = datetime.datetime(2015, 10, 14, 2, 40, 0)
# Format of string for dashboard statistics logs.
# NOTE TO DEVELOPERS: This format should not be changed, since it is used in
# the existing storage models for UserStatsModel.
DASHBOARD_STATS_DATETIME_STRING_FORMAT = '%Y-%m-%d'
# We generate images for existing math rich text components in batches. This
# gives the maximum size for a batch of Math SVGs in bytes.
MAX_SIZE_OF_MATH_SVGS_BATCH_BYTES = 31 * 1024 * 1024
# The maximum size of an uploaded file, in bytes.
MAX_FILE_SIZE_BYTES = 1048576
# The maximum playback length of an audio file, in seconds.
MAX_AUDIO_FILE_LENGTH_SEC = 300
# The maximum number of questions to be fetched at one time.
MAX_QUESTIONS_FETCHABLE_AT_ONE_TIME = 20
# The minimum score required for a user to review suggestions of a particular
# category.
MINIMUM_SCORE_REQUIRED_TO_REVIEW = 10
# The maximum number of skills to be requested at one time when fetching
# questions.
MAX_NUMBER_OF_SKILL_IDS = 20
# The prefix for an 'accepted suggestion' commit message.
COMMIT_MESSAGE_ACCEPTED_SUGGESTION_PREFIX = 'Accepted suggestion by'
# User id and username for exploration migration bot. Commits made by this bot
# are not reflected in the exploration summary models, but are recorded in the
# exploration commit log.
MIGRATION_BOT_USER_ID = 'OppiaMigrationBot'
MIGRATION_BOT_USERNAME = 'OppiaMigrationBot'
# User id and username for suggestion bot. This bot will be used to accept
# suggestions automatically after a threshold time.
SUGGESTION_BOT_USER_ID = 'OppiaSuggestionBot'
SUGGESTION_BOT_USERNAME = 'OppiaSuggestionBot'
# The system usernames are reserved usernames. Before adding new value to this
# dict, make sure that there aren't any similar usernames in the datastore.
# Note: All bot user IDs and usernames should start with "Oppia" and end with
# "Bot".
SYSTEM_USERS = {
SYSTEM_COMMITTER_ID: SYSTEM_COMMITTER_ID,
MIGRATION_BOT_USER_ID: MIGRATION_BOT_USERNAME,
SUGGESTION_BOT_USER_ID: SUGGESTION_BOT_USERNAME
}
# Ids and locations of the permitted extensions.
ALLOWED_RTE_EXTENSIONS = {
'Collapsible': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Collapsible')
},
'Image': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Image')
},
'Link': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Link')
},
'Math': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Math')
},
'Svgdiagram': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'svgdiagram')
},
'Tabs': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Tabs')
},
'Video': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Video')
},
}
# The list of interaction IDs which correspond to interactions that set their
# is_linear property to true. Linear interactions do not support branching and
# thus only allow for default answer classification. This value is guarded by a
# test in extensions.interactions.base_test.
LINEAR_INTERACTION_IDS = ['Continue']
# Demo explorations to load through the admin panel. The id assigned to each
# exploration is based on the key of the exploration in this dict, so ensure it
# doesn't change once it's in the list. Only integer-based indices should be
# used in this list, as it maintains backward compatibility with how demo
# explorations used to be assigned IDs. The value of each entry in this dict is
# either a YAML file or a directory (depending on whether it ends in .yaml).
# These explorations can be found under data/explorations.
DEMO_EXPLORATIONS = {
u'0': 'welcome.yaml',
u'1': 'multiples.yaml',
u'2': 'binary_search',
u'3': 'root_linear_coefficient_theorem.yaml',
u'4': 'three_balls',
# TODO(bhenning): Replace demo exploration '5' with a new exploration
# described in #1376.
u'6': 'boot_verbs.yaml',
u'7': 'hola.yaml',
u'8': 'adventure.yaml',
u'9': 'pitch_perfect.yaml',
u'10': 'test_interactions',
u'11': 'modeling_graphs',
u'12': 'protractor_test_1.yaml',
u'13': 'solar_system',
u'14': 'about_oppia.yaml',
u'15': 'classifier_demo_exploration.yaml',
u'16': 'all_interactions',
u'17': 'audio_test',
# Exploration with ID 18 was used for testing CodeClassifier functionality
# which has been removed (#10060).
u'19': 'example_exploration_in_collection1.yaml',
u'20': 'example_exploration_in_collection2.yaml',
u'21': 'example_exploration_in_collection3.yaml',
u'22': 'protractor_mobile_test_exploration.yaml',
u'23': 'rating_test.yaml',
u'24': 'learner_flow_test.yaml',
u'25': 'exploration_player_test.yaml',
}
DEMO_COLLECTIONS = {
u'0': 'welcome_to_collections.yaml',
u'1': 'learner_flow_test_collection.yaml'
}
# IDs of explorations which should not be displayable in either the learner or
# editor views.
DISABLED_EXPLORATION_IDS = ['5']
# Oppia Google Group URL.
GOOGLE_GROUP_URL = (
'https://groups.google.com/forum/?place=forum/oppia#!forum/oppia')
# External URL for the Foundation site.
FOUNDATION_SITE_URL = 'http://oppiafoundation.org'
# Prefix for all taskqueue-related URLs.
TASKQUEUE_URL_PREFIX = '/task'
TASK_URL_FEEDBACK_MESSAGE_EMAILS = (
'%s/email/batchfeedbackmessageemailhandler' % TASKQUEUE_URL_PREFIX)
TASK_URL_FEEDBACK_STATUS_EMAILS = (
'%s/email/feedbackthreadstatuschangeemailhandler' % TASKQUEUE_URL_PREFIX)
TASK_URL_FLAG_EXPLORATION_EMAILS = (
'%s/email/flagexplorationemailhandler' % TASKQUEUE_URL_PREFIX)
TASK_URL_INSTANT_FEEDBACK_EMAILS = (
'%s/email/instantfeedbackmessageemailhandler' % TASKQUEUE_URL_PREFIX)
TASK_URL_SUGGESTION_EMAILS = (
'%s/email/suggestionemailhandler' % TASKQUEUE_URL_PREFIX)
# TODO(sll): Add all other URLs here.
ADMIN_URL = '/admin'
ADMIN_ROLE_HANDLER_URL = '/adminrolehandler'
CLASSROOM_DATA_HANDLER = '/classroom_data_handler'
COLLECTION_DATA_URL_PREFIX = '/collection_handler/data'
COLLECTION_EDITOR_DATA_URL_PREFIX = '/collection_editor_handler/data'
COLLECTION_SUMMARIES_DATA_URL = '/collectionsummarieshandler/data'
COLLECTION_RIGHTS_PREFIX = '/collection_editor_handler/rights'
COLLECTION_PUBLISH_PREFIX = '/collection_editor_handler/publish'
COLLECTION_UNPUBLISH_PREFIX = '/collection_editor_handler/unpublish'
COLLECTION_EDITOR_URL_PREFIX = '/collection_editor/create'
COLLECTION_URL_PREFIX = '/collection'
CONCEPT_CARD_DATA_URL_PREFIX = '/concept_card_handler'
CONTRIBUTOR_DASHBOARD_URL = '/contributor-dashboard'
CONTRIBUTOR_OPPORTUNITIES_DATA_URL = '/opportunitiessummaryhandler'
CREATOR_DASHBOARD_DATA_URL = '/creatordashboardhandler/data'
CREATOR_DASHBOARD_URL = '/creator-dashboard'
CSRF_HANDLER_URL = '/csrfhandler'
CUSTOM_NONPROFITS_LANDING_PAGE_URL = '/nonprofits'
CUSTOM_PARENTS_LANDING_PAGE_URL = '/parents'
CUSTOM_PARTNERS_LANDING_PAGE_URL = '/partners'
CUSTOM_TEACHERS_LANDING_PAGE_URL = '/teachers'
CUSTOM_VOLUNTEERS_LANDING_PAGE_URL = '/volunteers'
DASHBOARD_CREATE_MODE_URL = '%s?mode=create' % CREATOR_DASHBOARD_URL
EDITOR_URL_PREFIX = '/create'
EXPLORATION_DATA_PREFIX = '/createhandler/data'
EXPLORATION_FEATURES_PREFIX = '/explorehandler/features'
EXPLORATION_INIT_URL_PREFIX = '/explorehandler/init'
EXPLORATION_LEARNER_ANSWER_DETAILS = (
'/learneranswerinfohandler/learner_answer_details')
EXPLORATION_METADATA_SEARCH_URL = '/exploration/metadata_search'
EXPLORATION_PRETESTS_URL_PREFIX = '/pretest_handler'
EXPLORATION_RIGHTS_PREFIX = '/createhandler/rights'
EXPLORATION_STATE_ANSWER_STATS_PREFIX = '/createhandler/state_answer_stats'
EXPLORATION_STATUS_PREFIX = '/createhandler/status'
EXPLORATION_SUMMARIES_DATA_URL = '/explorationsummarieshandler/data'
EXPLORATION_URL_PREFIX = '/explore'
EXPLORATION_URL_EMBED_PREFIX = '/embed/exploration'
FEEDBACK_STATS_URL_PREFIX = '/feedbackstatshandler'
FEEDBACK_THREAD_URL_PREFIX = '/threadhandler'
FEEDBACK_THREADLIST_URL_PREFIX = '/threadlisthandler'
FEEDBACK_THREADLIST_URL_PREFIX_FOR_TOPICS = '/threadlisthandlerfortopic'
FEEDBACK_THREAD_VIEW_EVENT_URL = '/feedbackhandler/thread_view_event'
FETCH_SKILLS_URL_PREFIX = '/fetch_skills'
FLAG_EXPLORATION_URL_PREFIX = '/flagexplorationhandler'
FRACTIONS_LANDING_PAGE_URL = '/fractions'
IMPROVEMENTS_URL_PREFIX = '/improvements'
IMPROVEMENTS_HISTORY_URL_PREFIX = '/improvements/history'
IMPROVEMENTS_CONFIG_URL_PREFIX = '/improvements/config'
LEARNER_ANSWER_INFO_HANDLER_URL = (
'/learneranswerinfohandler/learner_answer_details')
LEARNER_ANSWER_DETAILS_SUBMIT_URL = '/learneranswerdetailshandler'
LEARNER_DASHBOARD_URL = '/learner-dashboard'
LEARNER_DASHBOARD_DATA_URL = '/learnerdashboardhandler/data'
LEARNER_DASHBOARD_IDS_DATA_URL = '/learnerdashboardidshandler/data'
LEARNER_DASHBOARD_FEEDBACK_THREAD_DATA_URL = '/learnerdashboardthreadhandler'
LEARNER_PLAYLIST_DATA_URL = '/learnerplaylistactivityhandler'
LEARNER_INCOMPLETE_ACTIVITY_DATA_URL = '/learnerincompleteactivityhandler'
LIBRARY_GROUP_DATA_URL = '/librarygrouphandler'
LIBRARY_INDEX_URL = '/community-library'
LIBRARY_INDEX_DATA_URL = '/libraryindexhandler'
LIBRARY_RECENTLY_PUBLISHED_URL = '/community-library/recently-published'
LIBRARY_SEARCH_URL = '/search/find'
LIBRARY_SEARCH_DATA_URL = '/searchhandler/data'
LIBRARY_TOP_RATED_URL = '/community-library/top-rated'
MERGE_SKILLS_URL = '/merge_skills_handler'
NEW_COLLECTION_URL = '/collection_editor_handler/create_new'
NEW_EXPLORATION_URL = '/contributehandler/create_new'
NEW_QUESTION_URL = '/question_editor_handler/create_new'
NEW_SKILL_URL = '/skill_editor_handler/create_new'
TOPIC_EDITOR_STORY_URL = '/topic_editor_story_handler'
TOPIC_EDITOR_QUESTION_URL = '/topic_editor_question_handler'
NEW_TOPIC_URL = '/topic_editor_handler/create_new'
NOTIFICATIONS_DASHBOARD_URL = '/notifications'
PREFERENCES_URL = '/preferences'
PRACTICE_SESSION_URL_PREFIX = '/practice_session'
PRACTICE_SESSION_DATA_URL_PREFIX = '/practice_session/data'
PREFERENCES_DATA_URL = '/preferenceshandler/data'
QUESTION_EDITOR_DATA_URL_PREFIX = '/question_editor_handler/data'
QUESTION_SKILL_LINK_URL_PREFIX = '/manage_question_skill_link'
QUESTIONS_LIST_URL_PREFIX = '/questions_list_handler'
QUESTION_COUNT_URL_PREFIX = '/question_count_handler'
QUESTIONS_URL_PREFIX = '/question_player_handler'
RECENT_COMMITS_DATA_URL = '/recentcommitshandler/recent_commits'
RECENT_FEEDBACK_MESSAGES_DATA_URL = '/recent_feedback_messages'
DELETE_ACCOUNT_URL = '/delete-account'
DELETE_ACCOUNT_HANDLER_URL = '/delete-account-handler'
EXPORT_ACCOUNT_HANDLER_URL = '/export-account-handler'
PENDING_ACCOUNT_DELETION_URL = '/pending-account-deletion'
REVIEW_TEST_DATA_URL_PREFIX = '/review_test_handler/data'
REVIEW_TEST_URL_PREFIX = '/review_test'
ROBOTS_TXT_URL = '/robots.txt'
SITE_LANGUAGE_DATA_URL = '/save_site_language'
SIGNUP_DATA_URL = '/signuphandler/data'
SIGNUP_URL = '/signup'
SKILL_DASHBOARD_DATA_URL = '/skills_dashboard/data'
SKILL_DATA_URL_PREFIX = '/skill_data_handler'
SKILL_EDITOR_DATA_URL_PREFIX = '/skill_editor_handler/data'
SKILL_EDITOR_URL_PREFIX = '/skill_editor'
SKILL_EDITOR_QUESTION_URL = '/skill_editor_question_handler'
SKILL_MASTERY_DATA_URL = '/skill_mastery_handler/data'
SKILL_RIGHTS_URL_PREFIX = '/skill_editor_handler/rights'
STORY_DATA_HANDLER = '/story_data_handler'
STORY_EDITOR_URL_PREFIX = '/story_editor'
STORY_EDITOR_DATA_URL_PREFIX = '/story_editor_handler/data'
STORY_PROGRESS_URL_PREFIX = '/story_progress_handler'
STORY_PUBLISH_HANDLER = '/story_publish_handler'
STORY_URL_FRAGMENT_HANDLER = '/story_url_fragment_handler'
STORY_VIEWER_URL_PREFIX = '/story'
SUBTOPIC_DATA_HANDLER = '/subtopic_data_handler'
SUBTOPIC_VIEWER_URL_PREFIX = '/subtopic'
SUGGESTION_ACTION_URL_PREFIX = '/suggestionactionhandler'
SUGGESTION_LIST_URL_PREFIX = '/suggestionlisthandler'
SUGGESTION_URL_PREFIX = '/suggestionhandler'
SUBSCRIBE_URL_PREFIX = '/subscribehandler'
SUBTOPIC_PAGE_EDITOR_DATA_URL_PREFIX = '/subtopic_page_editor_handler/data'
TOPIC_VIEWER_URL_PREFIX = (
'/learn/<classroom_url_fragment>/<topic_url_fragment>')
TOPIC_DATA_HANDLER = '/topic_data_handler'
TOPIC_EDITOR_DATA_URL_PREFIX = '/topic_editor_handler/data'
TOPIC_EDITOR_URL_PREFIX = '/topic_editor'
TOPIC_NAME_HANDLER = '/topic_name_handler'
TOPIC_RIGHTS_URL_PREFIX = '/rightshandler/get_topic_rights'
TOPIC_SEND_MAIL_URL_PREFIX = '/rightshandler/send_topic_publish_mail'
TOPIC_STATUS_URL_PREFIX = '/rightshandler/change_topic_status'
TOPIC_URL_FRAGMENT_HANDLER = '/topic_url_fragment_handler'
TOPICS_AND_SKILLS_DASHBOARD_DATA_URL = '/topics_and_skills_dashboard/data'
UNASSIGN_SKILL_DATA_HANDLER_URL = '/topics_and_skills_dashboard/unassign_skill'
TOPICS_AND_SKILLS_DASHBOARD_URL = '/topics-and-skills-dashboard'
UNSUBSCRIBE_URL_PREFIX = '/unsubscribehandler'
UPLOAD_EXPLORATION_URL = '/contributehandler/upload'
USER_EXPLORATION_EMAILS_PREFIX = '/createhandler/notificationpreferences'
USER_PERMISSIONS_URL_PREFIX = '/createhandler/permissions'
USERNAME_CHECK_DATA_URL = '/usernamehandler/data'
VALIDATE_STORY_EXPLORATIONS_URL_PREFIX = '/validate_story_explorations'
# Event types.
EVENT_TYPE_ALL_STATS = 'all_stats'
EVENT_TYPE_STATE_HIT = 'state_hit'
EVENT_TYPE_STATE_COMPLETED = 'state_complete'
EVENT_TYPE_ANSWER_SUBMITTED = 'answer_submitted'
EVENT_TYPE_DEFAULT_ANSWER_RESOLVED = 'default_answer_resolved'
EVENT_TYPE_NEW_THREAD_CREATED = 'feedback_thread_created'
EVENT_TYPE_THREAD_STATUS_CHANGED = 'feedback_thread_status_changed'
EVENT_TYPE_RATE_EXPLORATION = 'rate_exploration'
EVENT_TYPE_SOLUTION_HIT = 'solution_hit'
EVENT_TYPE_LEAVE_FOR_REFRESHER_EXP = 'leave_for_refresher_exp'
# The values for these event types should be left as-is for backwards
# compatibility.
EVENT_TYPE_START_EXPLORATION = 'start'
EVENT_TYPE_ACTUAL_START_EXPLORATION = 'actual_start'
EVENT_TYPE_MAYBE_LEAVE_EXPLORATION = 'leave'
EVENT_TYPE_COMPLETE_EXPLORATION = 'complete'
# Play type constants.
PLAY_TYPE_PLAYTEST = 'playtest'
PLAY_TYPE_NORMAL = 'normal'
# Predefined commit messages.
COMMIT_MESSAGE_EXPLORATION_DELETED = 'Exploration deleted.'
COMMIT_MESSAGE_COLLECTION_DELETED = 'Collection deleted.'
COMMIT_MESSAGE_QUESTION_DELETED = 'Question deleted.'
COMMIT_MESSAGE_SKILL_DELETED = 'Skill deleted.'
COMMIT_MESSAGE_STORY_DELETED = 'Story deleted.'
COMMIT_MESSAGE_SUBTOPIC_PAGE_DELETED = 'Subtopic page deleted.'
COMMIT_MESSAGE_TOPIC_DELETED = 'Topic deleted.'
# Max number of playthroughs for an issue.
MAX_PLAYTHROUGHS_FOR_ISSUE = 5
# Number of unresolved answers to be displayed in the dashboard for each
# exploration.
TOP_UNRESOLVED_ANSWERS_COUNT_DASHBOARD = 3
# Number of open feedback to be displayed in the dashboard for each exploration.
OPEN_FEEDBACK_COUNT_DASHBOARD = 3
# NOTE TO DEVELOPERS: This should be synchronized with app.constants.ts.
ENABLE_ML_CLASSIFIERS = False
# The regular expression used to identify whether a string contains float value.
# The regex must match with regex that is stored in vmconf.py file of Oppia-ml.
# If this regex needs to be modified then first of all shutdown Oppia-ml VM.
# Then update the regex constant in here and Oppia both.
# Run any migration job that is required to migrate existing trained models
# before starting Oppia-ml again.
FLOAT_VERIFIER_REGEX = (
'^([-+]?\\d*\\.\\d+)$|^([-+]?(\\d*\\.?\\d+|\\d+\\.?\\d*)e[-+]?\\d*)$')
# Current event models schema version. All event models with an
# event_schema_version of 1 are the events collected before the rework of the
# statistics framework which brought about the recording of new event models;
# these models include all models recorded before Feb 2018.
CURRENT_EVENT_MODELS_SCHEMA_VERSION = 2
# Output formats of downloaded explorations.
OUTPUT_FORMAT_JSON = 'json'
OUTPUT_FORMAT_ZIP = 'zip'
# Types of updates shown in the 'recent updates' table in the dashboard page.
UPDATE_TYPE_EXPLORATION_COMMIT = 'exploration_commit'
UPDATE_TYPE_COLLECTION_COMMIT = 'collection_commit'
UPDATE_TYPE_FEEDBACK_MESSAGE = 'feedback_thread'
# Possible values for user query status.
# Valid status transitions are: processing --> completed --> archived
# or processing --> failed.
USER_QUERY_STATUS_PROCESSING = 'processing'
USER_QUERY_STATUS_COMPLETED = 'completed'
USER_QUERY_STATUS_ARCHIVED = 'archived'
USER_QUERY_STATUS_FAILED = 'failed'
# The time difference between which to consider two login events "close". This
# is taken to be 12 hours.
PROXIMAL_TIMEDELTA_SECS = 12 * 60 * 60
# The i18n id for the header of the "Featured Activities" category in the
# library index page.
LIBRARY_CATEGORY_FEATURED_ACTIVITIES = 'I18N_LIBRARY_GROUPS_FEATURED_ACTIVITIES'
# The i18n id for the header of the "Top Rated Explorations" category in the
# library index page.
LIBRARY_CATEGORY_TOP_RATED_EXPLORATIONS = (
'I18N_LIBRARY_GROUPS_TOP_RATED_EXPLORATIONS')
# The i18n id for the header of the "Recently Published" category in the
# library index page.
LIBRARY_CATEGORY_RECENTLY_PUBLISHED = 'I18N_LIBRARY_GROUPS_RECENTLY_PUBLISHED'
# The group name that appears at the end of the url for the recently published
# page.
LIBRARY_GROUP_RECENTLY_PUBLISHED = 'recently-published'
# The group name that appears at the end of the url for the top rated page.
LIBRARY_GROUP_TOP_RATED = 'top-rated'
# Defaults for topic similarities.
DEFAULT_TOPIC_SIMILARITY = 0.5
SAME_TOPIC_SIMILARITY = 1.0
# The type of the response returned by a handler when an exception is raised.
HANDLER_TYPE_HTML = 'html'
HANDLER_TYPE_JSON = 'json'
HANDLER_TYPE_DOWNLOADABLE = 'downloadable'
# Following are the constants for the role IDs.
ROLE_ID_GUEST = 'GUEST'
ROLE_ID_BANNED_USER = 'BANNED_USER'
ROLE_ID_LEARNER = 'LEARNER'
ROLE_ID_EXPLORATION_EDITOR = 'EXPLORATION_EDITOR'
ROLE_ID_COLLECTION_EDITOR = 'COLLECTION_EDITOR'
ROLE_ID_TOPIC_MANAGER = 'TOPIC_MANAGER'
ROLE_ID_MODERATOR = 'MODERATOR'
ROLE_ID_ADMIN = 'ADMIN'
# Intent of the User making query to role structure via admin interface. Used
# to store audit data regarding queries to role IDs.
ROLE_ACTION_UPDATE = 'update'
ROLE_ACTION_VIEW_BY_USERNAME = 'view_by_username'
ROLE_ACTION_VIEW_BY_ROLE = 'view_by_role'
USER_FILTER_CRITERION_ROLE = 'role'
USER_FILTER_CRITERION_USERNAME = 'username'
QUESTION_BATCH_SIZE = 10
STATE_ANSWER_STATS_MIN_FREQUENCY = 2
RTE_FORMAT_TEXTANGULAR = 'text-angular'
RTE_FORMAT_CKEDITOR = 'ck-editor'
# RTE content specifications according to the type of the editor.
RTE_CONTENT_SPEC = {
'RTE_TYPE_TEXTANGULAR': {
# Valid parent-child relation in TextAngular.
'ALLOWED_PARENT_LIST': {
'p': ['blockquote', 'div', 'pre', '[document]', 'ol', 'ul', 'li'],
'b': ['i', 'li', 'p', 'pre'],
'br': ['b', 'i', 'li', 'p'],
'i': ['b', 'li', 'p', 'pre'],
'li': ['ol', 'ul'],
'ol': ['ol', 'ul', 'blockquote', 'li', 'pre', 'div', '[document]'],
'ul': ['ol', 'ul', 'blockquote', 'li', 'pre', 'div', '[document]'],
'pre': ['ol', 'ul', 'blockquote', '[document]'],
'blockquote': ['blockquote', '[document]'],
'oppia-noninteractive-link': ['b', 'i', 'li', 'p', 'pre'],
'oppia-noninteractive-math': ['b', 'i', 'li', 'p', 'pre'],
'oppia-noninteractive-image': ['b', 'i', 'li', 'p', 'pre'],
'oppia-noninteractive-collapsible': ['b', 'i', 'li', 'p', 'pre'],
'oppia-noninteractive-video': ['b', 'i', 'li', 'p', 'pre'],
'oppia-noninteractive-tabs': ['b', 'i', 'li', 'p', 'pre'],
'oppia-noninteractive-svgdiagram': ['b', 'i', 'li', 'p', 'pre']
},
# Valid html tags in TextAngular.
'ALLOWED_TAG_LIST': [
'p',
'b',
'br',
'i',
'li',
'ol',
'ul',
'pre',
'blockquote',
'oppia-noninteractive-link',
'oppia-noninteractive-math',
'oppia-noninteractive-image',
'oppia-noninteractive-collapsible',
'oppia-noninteractive-video',
'oppia-noninteractive-tabs',
'oppia-noninteractive-svgdiagram'
]
},
'RTE_TYPE_CKEDITOR': {
# Valid parent-child relation in CKEditor.
'ALLOWED_PARENT_LIST': {
'p': ['blockquote', '[document]', 'li'],
'strong': ['em', 'li', 'p', 'pre'],
'em': ['strong', 'li', 'p', 'pre'],
'br': ['strong', 'em', 'li', 'p'],
'li': ['ol', 'ul'],
'ol': ['li', 'blockquote', 'pre', '[document]'],
'ul': ['li', 'blockquote', 'pre', '[document]'],
'pre': ['ol', 'ul', 'blockquote', 'li', '[document]'],
'blockquote': ['blockquote', '[document]'],
'oppia-noninteractive-link': ['strong', 'em', 'li', 'p', 'pre'],
'oppia-noninteractive-math': ['strong', 'em', 'li', 'p', 'pre'],
'oppia-noninteractive-image': ['blockquote', 'li', '[document]'],
'oppia-noninteractive-svgdiagram': [
'blockquote', 'li', '[document]'
],
'oppia-noninteractive-collapsible': [
'blockquote', 'li', '[document]'
],
'oppia-noninteractive-video': ['blockquote', 'li', '[document]'],
'oppia-noninteractive-tabs': ['blockquote', 'li', '[document]']
},
# Valid html tags in CKEditor.
'ALLOWED_TAG_LIST': [
'p',
'strong',
'br',
'em',
'li',
'ol',
'ul',
'pre',
'blockquote',
'oppia-noninteractive-link',
'oppia-noninteractive-math',
'oppia-noninteractive-image',
'oppia-noninteractive-collapsible',
'oppia-noninteractive-video',
'oppia-noninteractive-tabs',
'oppia-noninteractive-svgdiagram'
]
}
}
# A dict representing available landing pages, having subject as a key and list
# of topics as the value.
# Note: This dict needs to be keep in sync with frontend TOPIC_LANDING_PAGE_DATA
# oppia constant defined in
# core/templates/pages/landing-pages/TopicLandingPage.js file.
AVAILABLE_LANDING_PAGES = {
'math': ['fractions', 'negative-numbers', 'ratios']
}
# Classroom page names for generating URLs. These need to be kept in sync with
# CLASSROOM_PAGES_DATA property in config_domain.
CLASSROOM_PAGES = ['math']
# Authentication method using GAE ID (google sign in).
AUTH_METHOD_GAE = 'gae'
# TODO(#10501): Once domain objects can be imported by the storage layer, move
# these back to appropriate places (rights_domain, topic_domain).
# The reserved prefix for keys that are automatically inserted into a
# commit_cmd dict by this model.
AUTOGENERATED_PREFIX = 'AUTO'
# The command string for a revert commit.
CMD_REVERT_COMMIT = '%s_revert_version_number' % AUTOGENERATED_PREFIX
# The command string for a delete commit.
CMD_DELETE_COMMIT = '%s_mark_deleted' % AUTOGENERATED_PREFIX
# IMPORTANT: Ensure that all changes to how these cmds are interpreted preserve
# backward-compatibility with previous exploration snapshots in the datastore.
# Do not modify the definitions of CMD keys that already exist.
CMD_CREATE_NEW = 'create_new'
CMD_CHANGE_ROLE = 'change_role'
CMD_CHANGE_EXPLORATION_STATUS = 'change_exploration_status'
CMD_CHANGE_COLLECTION_STATUS = 'change_collection_status'
CMD_CHANGE_PRIVATE_VIEWABILITY = 'change_private_viewability'
CMD_RELEASE_OWNERSHIP = 'release_ownership'
CMD_UPDATE_FIRST_PUBLISHED_MSEC = 'update_first_published_msec'
# Roles used in collections and explorations.
ROLE_OWNER = 'owner'
ROLE_EDITOR = 'editor'
ROLE_VOICE_ARTIST = 'voice artist'
ROLE_VIEWER = 'viewer'
ROLE_NONE = 'none'
# The allowed list of roles which can be used in change_role command.
ALLOWED_ACTIVITY_ROLES = [
ROLE_OWNER, ROLE_EDITOR, ROLE_VOICE_ARTIST, ROLE_VIEWER]
# The allowed list of status which can be used in change_exploration_status
# and change_collection_status commands.
ALLOWED_ACTIVITY_STATUS = [
constants.ACTIVITY_STATUS_PRIVATE, constants.ACTIVITY_STATUS_PUBLIC]
# Commands allowed in CollectionRightsChange and ExplorationRightsChange.
COMMON_RIGHTS_ALLOWED_COMMANDS = [{
'name': CMD_CREATE_NEW,
'required_attribute_names': [],
'optional_attribute_names': [],
'user_id_attribute_names': []
}, {
'name': CMD_CHANGE_ROLE,
'required_attribute_names': ['assignee_id', 'old_role', 'new_role'],
'optional_attribute_names': [],
'user_id_attribute_names': ['assignee_id'],
'allowed_values': {
'new_role': ALLOWED_ACTIVITY_ROLES, 'old_role': ALLOWED_ACTIVITY_ROLES}
}, {
'name': CMD_CHANGE_PRIVATE_VIEWABILITY,
'required_attribute_names': [
'old_viewable_if_private', 'new_viewable_if_private'],
'optional_attribute_names': [],
'user_id_attribute_names': []
}, {
'name': CMD_RELEASE_OWNERSHIP,
'required_attribute_names': [],
'optional_attribute_names': [],
'user_id_attribute_names': []
}, {
'name': CMD_UPDATE_FIRST_PUBLISHED_MSEC,
'required_attribute_names': [
'old_first_published_msec', 'new_first_published_msec'],
'optional_attribute_names': [],
'user_id_attribute_names': []
}, {
'name': CMD_DELETE_COMMIT,
'required_attribute_names': [],
'optional_attribute_names': [],
'user_id_attribute_names': []
}]
COLLECTION_RIGHTS_CHANGE_ALLOWED_COMMANDS = copy.deepcopy(
COMMON_RIGHTS_ALLOWED_COMMANDS)
COLLECTION_RIGHTS_CHANGE_ALLOWED_COMMANDS.append({
'name': CMD_CHANGE_COLLECTION_STATUS,
'required_attribute_names': ['old_status', 'new_status'],
'optional_attribute_names': [],
'user_id_attribute_names': [],
'allowed_values': {
'old_status': ALLOWED_ACTIVITY_STATUS,
'new_status': ALLOWED_ACTIVITY_STATUS
}
})
EXPLORATION_RIGHTS_CHANGE_ALLOWED_COMMANDS = copy.deepcopy(
COMMON_RIGHTS_ALLOWED_COMMANDS)
EXPLORATION_RIGHTS_CHANGE_ALLOWED_COMMANDS.append({
'name': CMD_CHANGE_EXPLORATION_STATUS,
'required_attribute_names': ['old_status', 'new_status'],
'optional_attribute_names': [],
'user_id_attribute_names': [],
'allowed_values': {
'old_status': ALLOWED_ACTIVITY_STATUS,
'new_status': ALLOWED_ACTIVITY_STATUS
}
})
CMD_REMOVE_MANAGER_ROLE = 'remove_manager_role'
CMD_PUBLISH_TOPIC = 'publish_topic'
CMD_UNPUBLISH_TOPIC = 'unpublish_topic'
ROLE_MANAGER = 'manager'
# The allowed list of roles which can be used in TopicRightsChange change_role
# command.
ALLOWED_TOPIC_ROLES = [ROLE_NONE, ROLE_MANAGER]
# Commands allowed in TopicRightsChange.
TOPIC_RIGHTS_CHANGE_ALLOWED_COMMANDS = [{
'name': CMD_CREATE_NEW,
'required_attribute_names': [],
'optional_attribute_names': [],
'user_id_attribute_names': []
}, {
'name': CMD_CHANGE_ROLE,
'required_attribute_names': ['assignee_id', 'new_role', 'old_role'],
'optional_attribute_names': [],
'user_id_attribute_names': ['assignee_id'],
'allowed_values': {
'new_role': ALLOWED_TOPIC_ROLES, 'old_role': ALLOWED_TOPIC_ROLES
}
}, {
'name': CMD_REMOVE_MANAGER_ROLE,
'required_attribute_names': ['removed_user_id'],
'optional_attribute_names': [],
'user_id_attribute_names': ['removed_user_id']
}, {
'name': CMD_PUBLISH_TOPIC,
'required_attribute_names': [],
'optional_attribute_names': [],
'user_id_attribute_names': []
}, {
'name': CMD_UNPUBLISH_TOPIC,
'required_attribute_names': [],
'optional_attribute_names': [],
'user_id_attribute_names': []
}, {
'name': CMD_DELETE_COMMIT,
'required_attribute_names': [],
'optional_attribute_names': [],
'user_id_attribute_names': []
}]
# Length of user PIN for different roles used on Android.
FULL_USER_PIN_LENGTH = 5
PROFILE_USER_PIN_LENGTH = 3
| 40.171334 | 80 | 0.76866 |
577a98751ff395d91326ee4fd33c5f2f02ec3442
| 8,056 |
py
|
Python
|
examples/gcloud-example/dags/bigquery_github_trends.py
|
GabeCodev/airflow-tutorial
|
0fe6bfcc9687456a2cabd94b102e348df768c38c
|
[
"MIT"
] | null | null | null |
examples/gcloud-example/dags/bigquery_github_trends.py
|
GabeCodev/airflow-tutorial
|
0fe6bfcc9687456a2cabd94b102e348df768c38c
|
[
"MIT"
] | null | null | null |
examples/gcloud-example/dags/bigquery_github_trends.py
|
GabeCodev/airflow-tutorial
|
0fe6bfcc9687456a2cabd94b102e348df768c38c
|
[
"MIT"
] | null | null | null |
import json
from datetime import timedelta, datetime
from airflow import DAG
from airflow.models import Variable
from airflow.contrib.operators.bigquery_operator import BigQueryOperator
from airflow.contrib.operators.bigquery_check_operator import BigQueryCheckOperator
# Config variables(Recommended way for using variables in Airflow)
dag_config = Variable.get("bigquery_github_trends_variables", deserialize_json=True)
BQ_CONN_ID = dag_config["bq_conn_id"]
BQ_PROJECT = dag_config["bq_project"]
BQ_DATASET = dag_config["bq_dataset"]
default_args = {
'owner': 'airflow',
'depends_on_past': True,
'start_date': datetime(2020, 8, 1),
'end_date': datetime(2020, 8, 13),
'email': ['[email protected]'],
'email_on_failure': True,
'email_on_retry': False,
'retries': 2,
'retry_delay': timedelta(minutes=5),
}
# Set Schedule: Run pipeline once a day.
# Use cron to define exact time. Eg. 8:15am would be "15 08 * * *"
schedule_interval = "00 21 * * *"
# Define DAG: Set ID and assign default args and schedule interval
dag = DAG(
'bigquery_github_trends',
default_args=default_args,
schedule_interval=schedule_interval
)
## Task 1: check that the github archive data has a dated table created for that date
# To test this task, run this command:
# docker-compose -f docker-compose-gcloud.yml run --rm webserver airflow test bigquery_github_trends bq_check_githubarchive_day 2020-08-10
t1 = BigQueryCheckOperator(
task_id='bq_check_githubarchive_day',
sql='''
#standardSQL
SELECT
table_id
FROM
`githubarchive.day.__TABLES_SUMMARY__`
WHERE
table_id = "{{ yesterday_ds_nodash }}"
''',
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag
)
## Task 2: check that the hacker news table contains data for that date.
t2 = BigQueryCheckOperator(
task_id='bq_check_hackernews_full',
sql='''
#standardSQL
SELECT
timestamp,
FORMAT_TIMESTAMP("%Y%m%d", timestamp ) AS date
FROM
`bigquery-public-data.hacker_news.full`
WHERE
type = 'story'
AND FORMAT_TIMESTAMP("%Y%m%d", timestamp ) = "{{ yesterday_ds_nodash }}"
LIMIT
1
''',
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag
)
## Task 3: create a github daily metrics partition table
t3 = BigQueryOperator(
task_id='bq_write_to_github_daily_metrics',
sql='''
#standardSQL
SELECT
date,
repo,
SUM(IF(type='WatchEvent', 1, NULL)) AS stars,
SUM(IF(type='ForkEvent', 1, NULL)) AS forks
FROM (
SELECT
FORMAT_TIMESTAMP("%Y%m%d", created_at) AS date,
actor.id as actor_id,
repo.name as repo,
type
FROM
`githubarchive.day.{{ yesterday_ds_nodash }}`
WHERE type IN ('WatchEvent','ForkEvent')
)
GROUP BY
date,
repo
''',
destination_dataset_table='{0}.{1}.github_daily_metrics${2}'.format(
BQ_PROJECT, BQ_DATASET, '{{ yesterday_ds_nodash }}'
),
write_disposition='WRITE_TRUNCATE',
allow_large_results=True,
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag
)
## Task 4: aggregate past github events to daily partition table
t4 = BigQueryOperator(
task_id='bq_write_to_github_agg',
sql='''
#standardSQL
SELECT
"{2}" as date,
repo,
SUM(stars) as stars_last_28_days,
SUM(IF(_PARTITIONTIME BETWEEN TIMESTAMP("{4}")
AND TIMESTAMP("{3}") ,
stars, null)) as stars_last_7_days,
SUM(IF(_PARTITIONTIME BETWEEN TIMESTAMP("{3}")
AND TIMESTAMP("{3}") ,
stars, null)) as stars_last_1_day,
SUM(forks) as forks_last_28_days,
SUM(IF(_PARTITIONTIME BETWEEN TIMESTAMP("{4}")
AND TIMESTAMP("{3}") ,
forks, null)) as forks_last_7_days,
SUM(IF(_PARTITIONTIME BETWEEN TIMESTAMP("{3}")
AND TIMESTAMP("{3}") ,
forks, null)) as forks_last_1_day
FROM
`{0}.{1}.github_daily_metrics`
WHERE _PARTITIONTIME BETWEEN TIMESTAMP("{5}")
AND TIMESTAMP("{3}")
GROUP BY
date,
repo
'''.format(BQ_PROJECT, BQ_DATASET,
"{{ yesterday_ds_nodash }}", "{{ yesterday_ds }}",
"{{ macros.ds_add(ds, -6) }}",
"{{ macros.ds_add(ds, -27) }}"
)
,
destination_dataset_table='{0}.{1}.github_agg${2}'.format(
BQ_PROJECT, BQ_DATASET, '{{ yesterday_ds_nodash }}'
),
write_disposition='WRITE_TRUNCATE',
allow_large_results=True,
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag
)
# Task 5: aggregate hacker news data to a daily partition table
t5 = BigQueryOperator(
task_id='bq_write_to_hackernews_agg',
sql='''
#standardSQL
SELECT
FORMAT_TIMESTAMP("%Y%m%d", timestamp) AS date,
`by` AS submitter,
id as story_id,
REGEXP_EXTRACT(url, "(https?://github.com/[^/]*/[^/#?]*)") as url,
SUM(score) as score
FROM
`bigquery-public-data.hacker_news.full`
WHERE
type = 'story'
AND timestamp>'{{ yesterday_ds }}'
AND timestamp<'{{ ds }}'
AND url LIKE '%https://github.com%'
AND url NOT LIKE '%github.com/blog/%'
GROUP BY
date,
submitter,
story_id,
url
''',
destination_dataset_table='{0}.{1}.hackernews_agg${2}'.format(
BQ_PROJECT, BQ_DATASET, '{{ yesterday_ds_nodash }}'
),
write_disposition='WRITE_TRUNCATE',
allow_large_results=True,
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag
)
# Task 6: join the aggregate tables
t6 = BigQueryOperator(
task_id='bq_write_to_hackernews_github_agg',
sql='''
#standardSQL
SELECT
a.date as date,
a.url as github_url,
b.repo as github_repo,
a.score as hn_score,
a.story_id as hn_story_id,
b.stars_last_28_days as stars_last_28_days,
b.stars_last_7_days as stars_last_7_days,
b.stars_last_1_day as stars_last_1_day,
b.forks_last_28_days as forks_last_28_days,
b.forks_last_7_days as forks_last_7_days,
b.forks_last_1_day as forks_last_1_day
FROM
(SELECT
*
FROM
`{0}.{1}.hackernews_agg`
WHERE _PARTITIONTIME BETWEEN TIMESTAMP("{2}") AND TIMESTAMP("{2}")
)as a
LEFT JOIN
(
SELECT
repo,
CONCAT('https://github.com/', repo) as url,
stars_last_28_days,
stars_last_7_days,
stars_last_1_day,
forks_last_28_days,
forks_last_7_days,
forks_last_1_day
FROM
`{0}.{1}.github_agg`
WHERE _PARTITIONTIME BETWEEN TIMESTAMP("{2}") AND TIMESTAMP("{2}")
) as b
ON a.url = b.url
'''.format(
BQ_PROJECT, BQ_DATASET, "{{ yesterday_ds }}"
),
destination_dataset_table='{0}.{1}.hackernews_github_agg${2}'.format(
BQ_PROJECT, BQ_DATASET, '{{ yesterday_ds_nodash }}'
),
write_disposition='WRITE_TRUNCATE',
allow_large_results=True,
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag
)
# Task 7: Check if partition data is written successfully
t7 = BigQueryCheckOperator(
task_id='bq_check_hackernews_github_agg',
sql='''
#standardSQL
SELECT
COUNT(*) AS rows_in_partition
FROM `{0}.{1}.hackernews_github_agg`
WHERE _PARTITIONDATE = "{2}"
'''.format(BQ_PROJECT, BQ_DATASET, '{{ yesterday_ds }}'
),
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag)
# Setting up Dependencies
t3.set_upstream(t1)
t4.set_upstream(t3)
t5.set_upstream(t2)
t6.set_upstream(t4)
t6.set_upstream(t5)
t7.set_upstream(t6)
| 30.059701 | 138 | 0.618545 |
0f102e65d299101d02562cde9d5e76fd415f8332
| 7,812 |
py
|
Python
|
polls/views.py
|
seunghoon4176/Wezihack
|
3f4c00294e1937aa6b65eb24f5c4318defd69760
|
[
"MIT"
] | null | null | null |
polls/views.py
|
seunghoon4176/Wezihack
|
3f4c00294e1937aa6b65eb24f5c4318defd69760
|
[
"MIT"
] | null | null | null |
polls/views.py
|
seunghoon4176/Wezihack
|
3f4c00294e1937aa6b65eb24f5c4318defd69760
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.db.models import Count
from django.contrib import messages
from .models import Poll, Choice, Vote
from .forms import PollAddForm, EditPollForm, ChoiceAddForm
from django.http import HttpResponse
@login_required()
def polls_list(request):
all_polls = Poll.objects.all()
search_term = ''
if 'name' in request.GET:
all_polls = all_polls.order_by('text')
if 'date' in request.GET:
all_polls = all_polls.order_by('pub_date')
if 'vote' in request.GET:
all_polls = all_polls.annotate(Count('vote')).order_by('vote__count')
if 'search' in request.GET:
search_term = request.GET['search']
all_polls = all_polls.filter(text__icontains=search_term)
paginator = Paginator(all_polls, 6) # Show 6 contacts per page
page = request.GET.get('page')
polls = paginator.get_page(page)
get_dict_copy = request.GET.copy()
params = get_dict_copy.pop('page', True) and get_dict_copy.urlencode()
print(params)
context = {
'polls': polls,
'params': params,
'search_term': search_term,
}
return render(request, 'polls/polls_list.html', context)
@login_required()
def list_by_user(request):
all_polls = Poll.objects.filter(owner=request.user)
paginator = Paginator(all_polls, 7) # Show 7 contacts per page
page = request.GET.get('page')
polls = paginator.get_page(page)
context = {
'polls': polls,
}
return render(request, 'polls/polls_list.html', context)
@login_required()
def polls_add(request):
if request.user.has_perm('polls.add_poll'):
if request.method == 'POST':
form = PollAddForm(request.POST)
if form.is_valid:
poll = form.save(commit=False)
poll.owner = request.user
poll.save()
new_choice1 = Choice(
poll=poll, choice_text=form.cleaned_data['choice1']).save()
new_choice2 = Choice(
poll=poll, choice_text=form.cleaned_data['choice2']).save()
messages.success(
request, "Poll & Choices added successfully.", extra_tags='alert alert-success alert-dismissible fade show')
return redirect('polls:list')
else:
form = PollAddForm()
context = {
'form': form,
}
return render(request, 'polls/add_poll.html', context)
else:
return HttpResponse("Sorry but you don't have permission to do that!")
@login_required
def polls_edit(request, poll_id):
poll = get_object_or_404(Poll, pk=poll_id)
if request.user != poll.owner:
return redirect('home')
if request.method == 'POST':
form = EditPollForm(request.POST, instance=poll)
if form.is_valid:
form.save()
messages.success(request, "Poll Updated successfully.",
extra_tags='alert alert-success alert-dismissible fade show')
return redirect("polls:list")
else:
form = EditPollForm(instance=poll)
return render(request, "polls/poll_edit.html", {'form': form, 'poll': poll})
@login_required
def polls_delete(request, poll_id):
poll = get_object_or_404(Poll, pk=poll_id)
if request.user != poll.owner:
return redirect('home')
poll.delete()
messages.success(request, "Poll Deleted successfully.",
extra_tags='alert alert-success alert-dismissible fade show')
return redirect("polls:list")
@login_required
def add_choice(request, poll_id):
poll = get_object_or_404(Poll, pk=poll_id)
if request.user != poll.owner:
return redirect('home')
if request.method == 'POST':
form = ChoiceAddForm(request.POST)
if form.is_valid:
new_choice = form.save(commit=False)
new_choice.poll = poll
new_choice.save()
messages.success(
request, "Choice added successfully.", extra_tags='alert alert-success alert-dismissible fade show')
return redirect('polls:edit', poll.id)
else:
form = ChoiceAddForm()
context = {
'form': form,
}
return render(request, 'polls/add_choice.html', context)
@login_required
def choice_edit(request, choice_id):
choice = get_object_or_404(Choice, pk=choice_id)
poll = get_object_or_404(Poll, pk=choice.poll.id)
if request.user != poll.owner:
return redirect('home')
if request.method == 'POST':
form = ChoiceAddForm(request.POST, instance=choice)
if form.is_valid:
new_choice = form.save(commit=False)
new_choice.poll = poll
new_choice.save()
messages.success(
request, "Choice Updated successfully.", extra_tags='alert alert-success alert-dismissible fade show')
return redirect('polls:edit', poll.id)
else:
form = ChoiceAddForm(instance=choice)
context = {
'form': form,
'edit_choice': True,
'choice': choice,
}
return render(request, 'polls/add_choice.html', context)
@login_required
def choice_delete(request, choice_id):
choice = get_object_or_404(Choice, pk=choice_id)
poll = get_object_or_404(Poll, pk=choice.poll.id)
if request.user != poll.owner:
return redirect('home')
choice.delete()
messages.success(
request, "Choice Deleted successfully.", extra_tags='alert alert-success alert-dismissible fade show')
return redirect('polls:edit', poll.id)
def poll_detail(request, poll_id):
poll = get_object_or_404(Poll, id=poll_id)
if not poll.active:
return render(request, 'polls/poll_result.html', {'poll': poll})
loop_count = poll.choice_set.count()
context = {
'poll': poll,
'loop_time': range(0, loop_count),
}
return render(request, 'polls/poll_detail.html', context)
@login_required
def poll_vote(request, poll_id):
poll = get_object_or_404(Poll, pk=poll_id)
choice_id = request.POST.get('choice')
if not poll.user_can_vote(request.user):
messages.error(
request, "You already voted this poll!", extra_tags='alert alert-warning alert-dismissible fade show')
return redirect("polls:list")
if choice_id:
choice = Choice.objects.get(id=choice_id)
vote = Vote(user=request.user, poll=poll, choice=choice)
vote.save()
print(vote)
return render(request, 'polls/poll_result.html', {'poll': poll})
else:
messages.error(
request, "No choice selected!", extra_tags='alert alert-warning alert-dismissible fade show')
return redirect("polls:detail", poll_id)
return render(request, 'polls/poll_result.html', {'poll': poll})
@login_required
def endpoll(request, poll_id):
poll = get_object_or_404(Poll, pk=poll_id)
if request.user != poll.owner:
return redirect('home')
if poll.active is True:
poll.active = False
poll.save()
return render(request, 'polls/poll_result.html', {'poll': poll})
else:
return render(request, 'polls/poll_result.html', {'poll': poll})
@login_required()
def ilzeung(request):
return render(request, 'polls/ilzeung.html')
@login_required()
def map(request):
return render(request, 'polls/map.html')
@login_required()
def classes(request):
return render(request, 'polls/classes.html')
@login_required()
def food(request):
return render(request, 'polls/food.html')
@login_required()
def tong(request):
return render(request, 'polls/tong.html')
| 31.756098 | 128 | 0.646569 |
eee89f002eb01ca7ef40656401fbe46ea23447fe
| 8,590 |
py
|
Python
|
beginner_source/blitz/neural_networks_tutorial.py
|
kemingzeng/tutorials
|
e55b6e2f4f99c3d504447f0c151b1f99d2707981
|
[
"BSD-3-Clause"
] | 1 |
2018-05-11T02:59:49.000Z
|
2018-05-11T02:59:49.000Z
|
beginner_source/blitz/neural_networks_tutorial.py
|
kemingzeng/tutorials
|
e55b6e2f4f99c3d504447f0c151b1f99d2707981
|
[
"BSD-3-Clause"
] | null | null | null |
beginner_source/blitz/neural_networks_tutorial.py
|
kemingzeng/tutorials
|
e55b6e2f4f99c3d504447f0c151b1f99d2707981
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Neural Networks
===============
Neural networks can be constructed using the ``torch.nn`` package.
Now that you had a glimpse of ``autograd``, ``nn`` depends on
``autograd`` to define models and differentiate them.
An ``nn.Module`` contains layers, and a method ``forward(input)``\ that
returns the ``output``.
For example, look at this network that classifies digit images:
.. figure:: /_static/img/mnist.png
:alt: convnet
convnet
It is a simple feed-forward network. It takes the input, feeds it
through several layers one after the other, and then finally gives the
output.
A typical training procedure for a neural network is as follows:
- Define the neural network that has some learnable parameters (or
weights)
- Iterate over a dataset of inputs
- Process input through the network
- Compute the loss (how far is the output from being correct)
- Propagate gradients back into the network’s parameters
- Update the weights of the network, typically using a simple update rule:
``weight = weight - learning_rate * gradient``
Define the network
------------------
Let’s define this network:
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel, 6 output channels, 5x5 square convolution
# kernel
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
# an affine operation: y = Wx + b
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
# If the size is a square you can only specify a single number
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net)
########################################################################
# You just have to define the ``forward`` function, and the ``backward``
# function (where gradients are computed) is automatically defined for you
# using ``autograd``.
# You can use any of the Tensor operations in the ``forward`` function.
#
# The learnable parameters of a model are returned by ``net.parameters()``
params = list(net.parameters())
print(len(params))
print(params[0].size()) # conv1's .weight
########################################################################
# Let try a random 32x32 input
# Note: Expected input size to this net(LeNet) is 32x32. To use this net on
# MNIST dataset, please resize the images from the dataset to 32x32.
input = torch.randn(1, 1, 32, 32)
out = net(input)
print(out)
########################################################################
# Zero the gradient buffers of all parameters and backprops with random
# gradients:
net.zero_grad()
out.backward(torch.randn(1, 10))
########################################################################
# .. note::
#
# ``torch.nn`` only supports mini-batches. The entire ``torch.nn``
# package only supports inputs that are a mini-batch of samples, and not
# a single sample.
#
# For example, ``nn.Conv2d`` will take in a 4D Tensor of
# ``nSamples x nChannels x Height x Width``.
#
# If you have a single sample, just use ``input.unsqueeze(0)`` to add
# a fake batch dimension.
#
# Before proceeding further, let's recap all the classes you’ve seen so far.
#
# **Recap:**
# - ``torch.Tensor`` - A *multi-dimensional array* with support for autograd
# operations like ``backward()``. Also *holds the gradient* w.r.t. the
# tensor.
# - ``nn.Module`` - Neural network module. *Convenient way of
# encapsulating parameters*, with helpers for moving them to GPU,
# exporting, loading, etc.
# - ``nn.Parameter`` - A kind of Tensor, that is *automatically
# registered as a parameter when assigned as an attribute to a*
# ``Module``.
# - ``autograd.Function`` - Implements *forward and backward definitions
# of an autograd operation*. Every ``Tensor`` operation, creates at
# least a single ``Function`` node, that connects to functions that
# created a ``Tensor`` and *encodes its history*.
#
# **At this point, we covered:**
# - Defining a neural network
# - Processing inputs and calling backward
#
# **Still Left:**
# - Computing the loss
# - Updating the weights of the network
#
# Loss Function
# -------------
# A loss function takes the (output, target) pair of inputs, and computes a
# value that estimates how far away the output is from the target.
#
# There are several different
# `loss functions <http://pytorch.org/docs/nn.html#loss-functions>`_ under the
# nn package .
# A simple loss is: ``nn.MSELoss`` which computes the mean-squared error
# between the input and the target.
#
# For example:
output = net(input)
target = torch.arange(1, 11) # a dummy target, for example
target = target.view(1, -1) # make it the same shape as output
criterion = nn.MSELoss()
loss = criterion(output, target)
print(loss)
########################################################################
# Now, if you follow ``loss`` in the backward direction, using its
# ``.grad_fn`` attribute, you will see a graph of computations that looks
# like this:
#
# ::
#
# input -> conv2d -> relu -> maxpool2d -> conv2d -> relu -> maxpool2d
# -> view -> linear -> relu -> linear -> relu -> linear
# -> MSELoss
# -> loss
#
# So, when we call ``loss.backward()``, the whole graph is differentiated
# w.r.t. the loss, and all Tensors in the graph that has ``requres_grad=True``
# will have their ``.grad`` Tensor accumulated with the gradient.
#
# For illustration, let us follow a few steps backward:
print(loss.grad_fn) # MSELoss
print(loss.grad_fn.next_functions[0][0]) # Linear
print(loss.grad_fn.next_functions[0][0].next_functions[0][0]) # ReLU
########################################################################
# Backprop
# --------
# To backpropagate the error all we have to do is to ``loss.backward()``.
# You need to clear the existing gradients though, else gradients will be
# accumulated to existing gradients.
#
#
# Now we shall call ``loss.backward()``, and have a look at conv1's bias
# gradients before and after the backward.
net.zero_grad() # zeroes the gradient buffers of all parameters
print('conv1.bias.grad before backward')
print(net.conv1.bias.grad)
loss.backward()
print('conv1.bias.grad after backward')
print(net.conv1.bias.grad)
########################################################################
# Now, we have seen how to use loss functions.
#
# **Read Later:**
#
# The neural network package contains various modules and loss functions
# that form the building blocks of deep neural networks. A full list with
# documentation is `here <http://pytorch.org/docs/nn>`_.
#
# **The only thing left to learn is:**
#
# - Updating the weights of the network
#
# Update the weights
# ------------------
# The simplest update rule used in practice is the Stochastic Gradient
# Descent (SGD):
#
# ``weight = weight - learning_rate * gradient``
#
# We can implement this using simple python code:
#
# .. code:: python
#
# learning_rate = 0.01
# for f in net.parameters():
# f.data.sub_(f.grad.data * learning_rate)
#
# However, as you use neural networks, you want to use various different
# update rules such as SGD, Nesterov-SGD, Adam, RMSProp, etc.
# To enable this, we built a small package: ``torch.optim`` that
# implements all these methods. Using it is very simple:
import torch.optim as optim
# create your optimizer
optimizer = optim.SGD(net.parameters(), lr=0.01)
# in your training loop:
optimizer.zero_grad() # zero the gradient buffers
output = net(input)
loss = criterion(output, target)
loss.backward()
optimizer.step() # Does the update
###############################################################
# .. Note::
#
# Observe how gradient buffers had to be manually set to zero using
# ``optimizer.zero_grad()``. This is because gradients are accumulated
# as explained in `Backprop`_ section.
| 32.78626 | 79 | 0.629686 |
43114d22d6dfaa7a16851e1c11787cf384f8f99e
| 1,811 |
py
|
Python
|
mandelbrot/mandelbrot.py
|
ppai22/math_plots
|
16ca218e050ed3e3670c2f6bb75762a4c24753d2
|
[
"MIT"
] | null | null | null |
mandelbrot/mandelbrot.py
|
ppai22/math_plots
|
16ca218e050ed3e3670c2f6bb75762a4c24753d2
|
[
"MIT"
] | null | null | null |
mandelbrot/mandelbrot.py
|
ppai22/math_plots
|
16ca218e050ed3e3670c2f6bb75762a4c24753d2
|
[
"MIT"
] | null | null | null |
import seaborn as sns
import numpy as np
class Complex:
def __init__(self, real, imag):
self.real = real
self.imag = imag
def __str__(self):
return f"{self.real} + i{self.imag}"
def square(self):
_real = (self.real ** 2) - (self.imag ** 2)
_imag = 2 * self.real * self.imag
return Complex(_real, _imag)
def plus(self, c2):
return Complex(self.real + c2.real, self.imag + c2.imag)
class Mandelbrot:
def __init__(self, c, curr=Complex(0, 0)):
assert isinstance(c, Complex), f"{c} is not a valid input. Input needs to be of type class Complex"
assert isinstance(curr, Complex), f"{curr} is not a valid input. Input needs to be of type class Complex"
self.c = c
self.curr = curr
self.next = self.curr.square().plus(self.c)
def __str__(self):
return f"{self.curr.real} + i{self.curr.imag}"
def next_val(self):
return Mandelbrot(self.c, self.next)
def is_stable(self):
return (self.curr.real ** 2 + self.curr.imag ** 2) <= 4
def plot_mandelbrot(resolution=0.005, max_iter=200):
pts = []
for x in np.arange(-2, 2, resolution):
for y in np.arange(-2, 2, resolution):
c = Complex(x, y)
mandelbrot_val = Mandelbrot(c)
for iter in range(1, max_iter+1):
_next_val = mandelbrot_val.next_val()
if not _next_val.is_stable():
break
mandelbrot_val = _next_val
if iter == max_iter:
pts.append(c)
x_val = [item.real for item in pts]
y_val = [item.imag for item in pts]
sns.set(rc={'figure.figsize':(12, 12)})
sns.scatterplot(x_val, y_val)
plot_mandelbrot()
| 28.746032 | 113 | 0.567642 |
db75ee597da1da6a6313e5edad46e5b2e467fec8
| 19,695 |
py
|
Python
|
devilry/devilry_admin/views/assignment/students/delete_groups.py
|
aless80/devilry-django
|
416c262e75170d5662542f15e2d7fecf5ab84730
|
[
"BSD-3-Clause"
] | null | null | null |
devilry/devilry_admin/views/assignment/students/delete_groups.py
|
aless80/devilry-django
|
416c262e75170d5662542f15e2d7fecf5ab84730
|
[
"BSD-3-Clause"
] | null | null | null |
devilry/devilry_admin/views/assignment/students/delete_groups.py
|
aless80/devilry-django
|
416c262e75170d5662542f15e2d7fecf5ab84730
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from crispy_forms import layout
from django import forms
from django.contrib import messages
from django.db import models
from django.http import Http404
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy, pgettext_lazy
from django.views.generic import TemplateView
from django_cradmin import crapp
from django_cradmin.crinstance import reverse_cradmin_url
from django_cradmin.crispylayouts import CradminFormHelper, PrimarySubmit
from devilry.apps.core.models import Candidate, Assignment, AssignmentGroup
from devilry.devilry_admin.cradminextensions.listbuilder import listbuilder_assignmentgroup
from devilry.devilry_admin.views.assignment.students import groupview_base
from django_cradmin.viewhelpers import listbuilder
from devilry.devilry_cradmin import devilry_listbuilder
from devilry.devilry_cradmin import devilry_listfilter
class AbstractDeleteMethodLinkFrame(devilry_listbuilder.common.GoForwardLinkItemFrame):
valuealias = 'assignment'
def get_extra_css_classes_list(self):
return ['devilry-admin-assignment-students-overview-group-linkframe']
class FromAssignmentDeleteMethodLinkFrame(AbstractDeleteMethodLinkFrame):
def __init__(self, current_assignment, **kwargs):
self.current_assignment = current_assignment
super(FromAssignmentDeleteMethodLinkFrame, self).__init__(**kwargs)
def get_url(self):
return reverse_cradmin_url(
instanceid='devilry_admin_assignmentadmin',
appname='delete_groups',
roleid=self.current_assignment.id,
viewname='confirm_delete',
kwargs={'from_assignment_id': self.assignment.id}
)
class ManuallyDeleteMethodLinkFrame(AbstractDeleteMethodLinkFrame):
def get_url(self):
return reverse_cradmin_url(
instanceid='devilry_admin_assignmentadmin',
appname='delete_groups',
roleid=self.assignment.id,
viewname='manual_select'
)
class ChooseManualSelectItemValue(listbuilder.itemvalue.TitleDescription):
template_name = 'devilry_admin/assignment/students/delete_groups/choose-period-item-value.django.html'
def get_extra_css_classes_list(self):
return ['devilry-django-cradmin-listbuilder-itemvalue-titledescription-lg']
def get_title(self):
return pgettext_lazy('admin delete_groups', 'Manually')
def get_description(self):
return pgettext_lazy('admin delete_groups',
'You can delete some students on this assignment, or all of them')
class ChooseAssignmentItemValue(listbuilder.itemvalue.TitleDescription):
valuealias = 'assignment'
template_name = 'devilry_admin/assignment/students/delete_groups/choose-assignment-item-value.django.html'
def get_title(self):
return pgettext_lazy('admin delete_groups',
'Students that failed %(assignment)s') % {
'assignment': self.assignment.long_name
}
def get_description(self):
return pgettext_lazy('admin delete_groups',
'Delete students that did not pass %(assignment)s') % {
'assignment': self.assignment.long_name
}
class ChooseMethod(TemplateView):
template_name = 'devilry_admin/assignment/students/delete_groups/choose-method.django.html'
def get_pagetitle(self):
assignment = self.request.cradmin_role
return pgettext_lazy('admin delete_group', 'Delete students from %(assignment)s') % {
'assignment': assignment.get_path()
}
def get_pageheading(self):
assignment = self.request.cradmin_role
return pgettext_lazy('admin delete_group', 'Delete students from %(assignment)s') % {
'assignment': assignment.long_name
}
def get_page_subheading(self):
return pgettext_lazy('admin delete_group',
'Please select how you would like to delete students. You will get a '
'preview of your choice on the next page before any students are deleted.')
def dispatch(self, request, *args, **kwargs):
self.assignment = self.request.cradmin_role
self.period = self.assignment.parentnode
return super(ChooseMethod, self).dispatch(request, *args, **kwargs)
def __make_listbuilder_list(self):
current_assignment = self.request.cradmin_role
listbuilder_list = listbuilder.lists.RowList()
listbuilder_list.append(ManuallyDeleteMethodLinkFrame(
ChooseManualSelectItemValue(value=current_assignment)))
assignments = self.period.assignments\
.order_by('-publishing_time')\
.exclude(pk=self.assignment.pk)
for assignment in assignments:
listbuilder_list.append(
FromAssignmentDeleteMethodLinkFrame(
current_assignment=current_assignment,
inneritem=ChooseAssignmentItemValue(value=assignment)))
return listbuilder_list
def get_context_data(self, **kwargs):
context = super(ChooseMethod, self).get_context_data(**kwargs)
context['listbuilder_list'] = self.__make_listbuilder_list()
context['pagetitle'] = self.get_pagetitle()
context['pageheading'] = self.get_pageheading()
context['page_subheading'] = self.get_page_subheading()
return context
class DeleteGroupsTargetRenderer(devilry_listbuilder.assignmentgroup.GroupTargetRenderer):
def get_submit_button_text(self):
return ugettext_lazy('Delete students')
def get_with_items_title(self):
return ugettext_lazy('Delete the following students:')
class DeleteGroupsView(groupview_base.BaseMultiselectView):
filterview_name = 'filter'
template_name = 'devilry_admin/assignment/students/delete_groups.django.html'
def add_filterlist_items(self, filterlist):
if self.has_delete_with_content_permission():
super(DeleteGroupsView, self).add_filterlist_items(filterlist=filterlist)
else:
filterlist.append(devilry_listfilter.assignmentgroup.SearchNotAnonymous())
filterlist.append(devilry_listfilter.assignmentgroup.OrderByNotAnonymous())
filterlist.append(devilry_listfilter.assignmentgroup.ExaminerFilter(view=self))
filterlist.append(devilry_listfilter.assignmentgroup.ExaminerCountFilter())
filterlist.append(devilry_listfilter.assignmentgroup.ActivityFilter())
def get_status_filter_value(self):
return 'all'
def get_target_renderer_class(self):
return DeleteGroupsTargetRenderer
def has_delete_with_content_permission(self):
return self.request.cradmin_instance.get_devilryrole_for_requestuser() == 'departmentadmin'
def get_unfiltered_queryset_for_role(self, role):
queryset = super(DeleteGroupsView, self) \
.get_unfiltered_queryset_for_role(role=role)
if self.has_delete_with_content_permission():
return queryset
else:
return queryset\
.exclude(cached_data__public_student_comment_count__gt=0)\
.exclude(cached_data__public_examiner_comment_count__gt=0)\
.exclude(cached_data__last_published_feedbackset__isnull=False)
def get_context_data(self, **kwargs):
context = super(DeleteGroupsView, self).get_context_data(**kwargs)
context['has_delete_with_content_permission'] = self.has_delete_with_content_permission()
return context
def get_success_message(self, candidatecount):
return ugettext_lazy('Deleted %(count)s students from this assignment.') % {
'count': candidatecount
}
def __count_candidates_in_assignmentgroups(self, groupqueryset):
return Candidate.objects\
.filter(assignment_group__in=groupqueryset)\
.count()
def get_success_url(self):
return self.request.cradmin_instance.appindex_url(appname='delete_groups')
def form_valid(self, form):
groupqueryset = form.cleaned_data['selected_items']
candidatecount = self.__count_candidates_in_assignmentgroups(
groupqueryset=groupqueryset)
groupqueryset.delete()
messages.success(self.request, self.get_success_message(candidatecount=candidatecount))
return super(DeleteGroupsView, self).form_valid(form=form)
class SelectedAssignmentGroupsForm(forms.Form):
selected_items = forms.ModelMultipleChoiceField(
widget=forms.MultipleHiddenInput,
queryset=AssignmentGroup.objects.none())
def __init__(self, *args, **kwargs):
assignmentgroup_queryset = kwargs.pop('assignmentgroup_queryset')
super(SelectedAssignmentGroupsForm, self).__init__(*args, **kwargs)
self.fields['selected_items'].queryset = assignmentgroup_queryset
class BulkSelectionDeleteGroupsViewMixin(object):
"""
Mixin class for deleting a set of :class:`~.devilry.core.apps.models.assignment_group.AssignmentGroup`s.
The set of `AssignmentGroup`s to delete is added as data to a from, and this is the set to delete.
Handle querysets of groups to delete in subclass.
"""
form_invalid_message = pgettext_lazy(
'admin delete_groups',
'Oups! Something went wrong. This may happen if someone edited '
'students on the assignment or the semester while you were making '
'your selection. Please try again.')
def dispatch(self, request, *args, **kwargs):
self.assignment = request.cradmin_role
self.period = self.assignment.parentnode
return super(BulkSelectionDeleteGroupsViewMixin, self).dispatch(request, *args, **kwargs)
def get_unfiltered_queryset_for_role(self, role):
return AssignmentGroup.objects.none()
def get_form_class(self):
return SelectedAssignmentGroupsForm
def get_form(self):
form_class = self.get_form_class()
return form_class(**self.get_form_kwargs())
def get_form_kwargs(self):
kwargs = {
'assignmentgroup_queryset': self.get_unfiltered_queryset_for_role(role=self.request.cradmin_role)
}
if self.request.method == 'POST':
kwargs['data'] = self.request.POST
return kwargs
def post(self, request, *args, **kwargs):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def get_success_url(self):
return self.request.cradmin_instance.appindex_url('studentoverview')
def form_valid(self, form):
selected_assignment_groups = form.cleaned_data['selected_items']
selected_assignment_groups_count = selected_assignment_groups.count()
selected_assignment_groups.delete()
success_message = self.get_success_message(delete_group_count=selected_assignment_groups_count)
if success_message:
messages.success(request=self.request, message=success_message)
return redirect(self.get_success_url())
def get_error_url(self):
return self.request.get_full_path()
def get_success_message(self, delete_group_count):
return None
def form_invalid(self, form):
messages.error(self.request, self.form_invalid_message)
return redirect(self.get_error_url())
class ConfirmView(BulkSelectionDeleteGroupsViewMixin,
listbuilder_assignmentgroup.VerticalFilterListView):
"""
Confirmation view listing :class:`~.devilry.core.apps.models.assignment_group.AssignmentGroup` that will be
deleted. The user confirms the deletion here, and it can not be undone.
Groups with any activity, or groups where the last :class:`~.devilry.devilry_group.models.FeedbackSet` is not
published will not be deleted.
Only groups that fulfill these criteria can be deleted:
- No public student comments (comments, uploads, etc)
- No public examiner comments (comments, uploads, etc)
- Cached data last feedbackset grading_published_datetime is not NULL
- Cached data last feedbackset grading_points is less than the assignments minimum points to pass.
- The `class`:`~.devilry.apps.core.models.relateduser.RelatedStudent` must be in both the current assignment
and the previous assignment (selected before the confirm view).
- Only groups on the current assignment with ONE student.
Note::
If a group has multiple students (a project group), and one of them failed the selected assignment, their
project group WILL be deleted.
"""
# value_renderer_class = listbuilder_assignmentgroup.AssignmentGroupItemValueTitleDescription
def dispatch(self, request, *args, **kwargs):
try:
self.from_assignment = Assignment.objects.get(id=kwargs.get('from_assignment_id'))
except Assignment.DoesNotExist:
raise Http404()
return super(ConfirmView, self).dispatch(request, *args, **kwargs)
def get_pagetitle(self):
return pgettext_lazy(
'admin delete_groups',
'Confirm that you want to delete students that failed %(from_assignment_name)s') % {
'from_assignment_name': self.from_assignment.long_name
}
def get_pageheading(self):
return pgettext_lazy(
'admin delete_groups',
'Confirm that you want to delete students that failed %(from_assignment_name)s') % {
'from_assignment_name': self.from_assignment.long_name
}
def get_value_renderer_class(self):
devilryrole = self.request.cradmin_instance.get_devilryrole_for_requestuser()
if devilryrole == 'departmentadmin':
return devilry_listbuilder.assignmentgroup.MinimalDepartmentAdminItemValue
elif devilryrole == 'subjectadmin':
if self.assignment.anonymizationmode == Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS:
return devilry_listbuilder.assignmentgroup.FullyAnonymousSubjectAdminItemValue
else:
return devilry_listbuilder.assignmentgroup.MinimalSubjectAdminItemValue
elif devilryrole == 'periodadmin':
return devilry_listbuilder.assignmentgroup.MinimalPeriodAdminItemValue
else:
raise ValueError('Invalid devilryrole: {}'.format(devilryrole))
def get_value_and_frame_renderer_kwargs(self):
return {
'assignment': self.request.cradmin_role
}
def get_period(self):
return self.assignment.period
def get_filterlist_template_name(self):
return 'devilry_admin/assignment/students/delete_groups/confirm.django.html'
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
viewname='confirm_delete',
kwargs={
'from_assignment_id': self.from_assignment.id,
'filters_string': filters_string
})
def __failed_from_group_queryset(self):
"""
Get all `AssignmentGroup`s that failed the selected earlier assignment.
"""
return AssignmentGroup.objects\
.filter(parentnode_id=self.from_assignment.id)\
.filter(cached_data__last_feedbackset__grading_published_datetime__isnull=False)\
.filter(cached_data__last_feedbackset__grading_points__lt=self.from_assignment.passing_grade_min_points)
def get_unfiltered_queryset_for_role(self, role):
"""
Find all `AssignmentGroup`s on the current assignment where the students where also in groups that
failed the selected earlier assignment.
"""
failed_group_ids = self.__failed_from_group_queryset()\
.values_list('id', flat=True)
relatedstudents_from_failed_groups_ids = Candidate.objects\
.filter(assignment_group_id__in=failed_group_ids)\
.values_list('relatedstudent_id', flat=True)
return AssignmentGroup.objects\
.filter(parentnode_id=role.id)\
.prefetch_related(
models.Prefetch('candidates', queryset=Candidate.objects.select_related(
'assignment_group', 'relatedstudent', 'relatedstudent__user')))\
.filter(candidates__relatedstudent_id__in=relatedstudents_from_failed_groups_ids) \
.filter(
models.Q(cached_data__first_feedbackset=models.F('cached_data__last_feedbackset'))
&
models.Q(cached_data__last_published_feedbackset__isnull=True)) \
.exclude(cached_data__public_student_comment_count__gt=0) \
.exclude(cached_data__public_examiner_comment_count__gt=0) \
.filter(cached_data__candidate_count=1)
def get_form_kwargs(self):
kwargs = super(ConfirmView, self).get_form_kwargs()
if self.request.method == 'GET':
assignmentgroup_queryset = kwargs['assignmentgroup_queryset']
kwargs['initial'] = {
'selected_items': assignmentgroup_queryset.values_list('id', flat=True),
}
return kwargs
def __get_formhelper(self):
helper = CradminFormHelper()
helper.form_class = 'django-cradmin-form-wrapper devilry-django-cradmin-form-wrapper-top-bottom-spacing'
helper.form_id = 'devilry_admin_delete_groups_confirm_form'
helper.layout = layout.Layout(
'selected_items',
PrimarySubmit('delete_groups', pgettext_lazy('admin delete_groups', 'Delete students'))
)
helper.form_action = self.request.get_full_path()
return helper
def __get_total_candidate_count(self):
return Candidate.objects\
.filter(assignment_group__parentnode=self.request.cradmin_role)\
.count()
def get_success_message(self, delete_group_count):
return pgettext_lazy('admin delete groups confirm',
'Successfully deleted %(delete_group_count)s students from the assignment') % {
'delete_group_count': delete_group_count
}
def get_context_data(self, **kwargs):
context = super(ConfirmView, self).get_context_data(**kwargs)
context['from_assignment'] = self.from_assignment
context['no_groups_found'] = not self.get_unfiltered_queryset_for_role(
role=self.request.cradmin_role).exists()
num_candidates_total = self.__get_total_candidate_count()
num_to_be_deleted = self.get_unfiltered_queryset_for_role(
role=self.request.cradmin_role).count()
context['num_candidates_total'] = num_candidates_total
context['num_to_be_deleted'] = num_to_be_deleted
context['num_excluded'] = num_candidates_total - num_to_be_deleted
context['formhelper'] = self.__get_formhelper()
context['form'] = self.get_form()
return context
class App(crapp.App):
appurls = [
crapp.Url(r'^$',
ChooseMethod.as_view(),
name=crapp.INDEXVIEW_NAME
),
crapp.Url(r'^manual-select$',
DeleteGroupsView.as_view(),
name='manual_select'),
crapp.Url(r'^manual-select/filter/(?P<filters_string>.+)?$',
DeleteGroupsView.as_view(),
name='filter'),
crapp.Url(r'^confirm/(?P<from_assignment_id>\d+)/(?P<filters_string>.+)?$',
ConfirmView.as_view(),
name='confirm_delete'),
]
| 43.09628 | 116 | 0.696776 |
0d405dfc2e1b0c4e47f54f2c31d4f34a1beb38f5
| 1,056 |
py
|
Python
|
DailyFrsh/DailyFrsh/urls.py
|
GSFNE/b-b-b
|
6004306a752cefc35956cc20da1af4e47168666e
|
[
"MIT"
] | null | null | null |
DailyFrsh/DailyFrsh/urls.py
|
GSFNE/b-b-b
|
6004306a752cefc35956cc20da1af4e47168666e
|
[
"MIT"
] | null | null | null |
DailyFrsh/DailyFrsh/urls.py
|
GSFNE/b-b-b
|
6004306a752cefc35956cc20da1af4e47168666e
|
[
"MIT"
] | null | null | null |
"""DailyFrsh URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)), # 站点管理
url(r'^user/', include('apps.user.urls', namespace='user')), # 用户模块
url(r'^cart/', include('apps.cart.urls', namespace='cart')), # 购物车模块
url(r'^order/', include('apps.order.urls', namespace='order')), # 订单模块
url(r'^', include('apps.goods.urls', namespace='goods')), # 商品模块
]
| 40.615385 | 77 | 0.679924 |
2961d5c94cf5709954437a02525afe1b0885b91f
| 6,662 |
py
|
Python
|
src/transformers/models/herbert/tokenization_herbert_fast.py
|
HimashiRathnayake/adapter-transformers
|
d9c06ecbf4aaa33756e848b8fc5b3ec65f5ff4f4
|
[
"Apache-2.0"
] | 50,404 |
2019-09-26T09:55:55.000Z
|
2022-03-31T23:07:49.000Z
|
src/transformers/models/herbert/tokenization_herbert_fast.py
|
HimashiRathnayake/adapter-transformers
|
d9c06ecbf4aaa33756e848b8fc5b3ec65f5ff4f4
|
[
"Apache-2.0"
] | 13,179 |
2019-09-26T10:10:57.000Z
|
2022-03-31T23:17:08.000Z
|
src/transformers/models/herbert/tokenization_herbert_fast.py
|
HimashiRathnayake/adapter-transformers
|
d9c06ecbf4aaa33756e848b8fc5b3ec65f5ff4f4
|
[
"Apache-2.0"
] | 13,337 |
2019-09-26T10:49:38.000Z
|
2022-03-31T23:06:17.000Z
|
# coding=utf-8
# Copyright 2020 The Google AI Language Team Authors, Allegro.pl, Facebook Inc. and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"allegro/herbert-base-cased": 514}
PRETRAINED_INIT_CONFIGURATION = {}
class HerbertTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "Fast" BPE tokenizer for HerBERT (backed by HuggingFace's `tokenizers` library).
Peculiarities:
- uses BERT's pre-tokenizer: BertPreTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of
a punctuation character will be treated separately.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the methods. Users
should refer to the superclass for more information regarding methods.
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
merges_file (:obj:`str`):
Path to the merges file.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = HerbertTokenizer
def __init__(
self,
vocab_file=None,
merges_file=None,
tokenizer_file=None,
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
sep_token="</s>",
**kwargs
):
super().__init__(
vocab_file,
merges_file,
tokenizer_file=tokenizer_file,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
sep_token=sep_token,
**kwargs,
)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An HerBERT, like BERT sequence has the following format:
- single sequence: ``<s> X </s>``
- pair of sequences: ``<s> A </s> B </s>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
cls = [self.cls_token_id]
sep = [self.sep_token_id]
if token_ids_1 is None:
return cls + token_ids_0 + sep
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. HerBERT, like
BERT sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
| 37.852273 | 119 | 0.64305 |
90dab899a1f87f6e81eabbcfd86b3197c3ea0dee
| 6,479 |
py
|
Python
|
venv/lib/python2.7/dist-packages/landscape/monitor/computerinfo.py
|
pengwu/scapy_env
|
3db9c5dea2e219048a2387649d6d89be342903d9
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/dist-packages/landscape/monitor/computerinfo.py
|
pengwu/scapy_env
|
3db9c5dea2e219048a2387649d6d89be342903d9
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/dist-packages/landscape/monitor/computerinfo.py
|
pengwu/scapy_env
|
3db9c5dea2e219048a2387649d6d89be342903d9
|
[
"MIT"
] | null | null | null |
import os
import logging
from twisted.internet.defer import inlineCallbacks, returnValue
from landscape.lib.fetch import fetch_async
from landscape.lib.fs import read_file
from landscape.lib.lsb_release import LSB_RELEASE_FILENAME, parse_lsb_release
from landscape.lib.cloud import fetch_ec2_meta_data
from landscape.lib.network import get_fqdn
from landscape.monitor.plugin import MonitorPlugin
METADATA_RETRY_MAX = 3 # Number of retries to get EC2 meta-data
class DistributionInfoError(Exception):
pass
class ComputerInfo(MonitorPlugin):
"""Plugin captures and reports basic computer information."""
persist_name = "computer-info"
scope = "computer"
def __init__(self, get_fqdn=get_fqdn,
meminfo_filename="/proc/meminfo",
lsb_release_filename=LSB_RELEASE_FILENAME,
root_path="/", fetch_async=fetch_async):
self._get_fqdn = get_fqdn
self._meminfo_filename = meminfo_filename
self._lsb_release_filename = lsb_release_filename
self._root_path = root_path
self._cloud_instance_metadata = None
self._cloud_retries = 0
self._fetch_async = fetch_async
def register(self, registry):
super(ComputerInfo, self).register(registry)
self._annotations_path = registry.config.annotations_path
self.call_on_accepted("computer-info",
self.send_computer_message, True)
self.call_on_accepted("distribution-info",
self.send_distribution_message, True)
self.call_on_accepted("cloud-instance-metadata",
self.send_cloud_instance_metadata_message, True)
def send_computer_message(self, urgent=False):
message = self._create_computer_info_message()
if message:
message["type"] = "computer-info"
logging.info("Queueing message with updated computer info.")
self.registry.broker.send_message(message, self._session_id,
urgent=urgent)
def send_distribution_message(self, urgent=False):
message = self._create_distribution_info_message()
if message:
message["type"] = "distribution-info"
logging.info("Queueing message with updated distribution info.")
self.registry.broker.send_message(message, self._session_id,
urgent=urgent)
@inlineCallbacks
def send_cloud_instance_metadata_message(self, urgent=False):
message = yield self._create_cloud_instance_metadata_message()
if message:
message["type"] = "cloud-instance-metadata"
logging.info("Queueing message with updated cloud instance "
"metadata.")
self.registry.broker.send_message(message, self._session_id,
urgent=urgent)
def exchange(self, urgent=False):
broker = self.registry.broker
broker.call_if_accepted("computer-info",
self.send_computer_message, urgent)
broker.call_if_accepted("distribution-info",
self.send_distribution_message, urgent)
broker.call_if_accepted("cloud-instance-metadata",
self.send_cloud_instance_metadata_message,
urgent)
def _create_computer_info_message(self):
message = {}
self._add_if_new(message, "hostname", self._get_fqdn())
total_memory, total_swap = self._get_memory_info()
self._add_if_new(message, "total-memory", total_memory)
self._add_if_new(message, "total-swap", total_swap)
annotations = {}
if os.path.exists(self._annotations_path):
for key in os.listdir(self._annotations_path):
annotations[key] = read_file(
os.path.join(self._annotations_path, key))
if annotations:
self._add_if_new(message, "annotations", annotations)
return message
def _add_if_new(self, message, key, value):
if value != self._persist.get(key):
self._persist.set(key, value)
message[key] = value
def _create_distribution_info_message(self):
message = self._get_distribution_info()
if message != self._persist.get("distribution-info"):
self._persist.set("distribution-info", message)
return message
return None
def _get_memory_info(self):
"""Get details in megabytes and return a C{(memory, swap)} tuple."""
message = {}
file = open(self._meminfo_filename)
for line in file:
if line != '\n':
parts = line.split(":")
key = parts[0]
if key in ["MemTotal", "SwapTotal"]:
value = int(parts[1].strip().split(" ")[0])
message[key] = value
file.close()
return (message["MemTotal"] // 1024, message["SwapTotal"] // 1024)
def _get_distribution_info(self):
"""Get details about the distribution."""
message = {}
message.update(parse_lsb_release(self._lsb_release_filename))
return message
@inlineCallbacks
def _create_cloud_instance_metadata_message(self):
"""Fetch cloud metadata and insert it in a message."""
message = None
if (self._cloud_instance_metadata is None and
self._cloud_retries < METADATA_RETRY_MAX):
self._cloud_instance_metadata = yield self._fetch_ec2_meta_data()
message = self._cloud_instance_metadata
returnValue(message)
def _fetch_ec2_meta_data(self):
"""Fetch information about the cloud instance."""
if self._cloud_retries == 0:
logging.info("Querying cloud meta-data.")
deferred = fetch_ec2_meta_data(self._fetch_async)
def log_no_meta_data_found(error):
self._cloud_retries += 1
if self._cloud_retries >= METADATA_RETRY_MAX:
logging.info("No cloud meta-data available. %s" %
error.getErrorMessage())
def log_success(result):
logging.info("Acquired cloud meta-data.")
return result
deferred.addCallback(log_success)
deferred.addErrback(log_no_meta_data_found)
return deferred
| 40.49375 | 78 | 0.629727 |
868689b3f72ea542f96bc1b89fb5369c67512ee1
| 2,072 |
py
|
Python
|
reverse_nodes_in_k_group.py
|
KevinLuo41/LeetCodeInPython
|
051e1aab9bab17b0d63b4ca73473a7a00899a16a
|
[
"Apache-2.0"
] | 19 |
2015-01-19T19:36:09.000Z
|
2020-03-18T03:10:12.000Z
|
reverse_nodes_in_k_group.py
|
CodingVault/LeetCodeInPython
|
051e1aab9bab17b0d63b4ca73473a7a00899a16a
|
[
"Apache-2.0"
] | null | null | null |
reverse_nodes_in_k_group.py
|
CodingVault/LeetCodeInPython
|
051e1aab9bab17b0d63b4ca73473a7a00899a16a
|
[
"Apache-2.0"
] | 12 |
2015-04-25T14:20:38.000Z
|
2020-09-27T04:59:59.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
reverse_nodes_in_k_group.py
Created by Shengwei on 2014-07-26.
"""
# https://oj.leetcode.com/problems/reverse-nodes-in-k-group/
# tags: medium, linked-list, pointer, dummy head, edge cases, reverse
"""
Given a linked list, reverse the nodes of a linked list k at a time and return its modified list.
If the number of nodes is not a multiple of k then left-out nodes in the end should remain as it is.
You may not alter the values in the nodes, only nodes itself may be changed.
Only constant memory is allowed.
For example,
Given this linked list: 1->2->3->4->5
For k = 2, you should return: 2->1->4->3->5
For k = 3, you should return: 3->2->1->4->5
"""
# https://oj.leetcode.com/discuss/6113/my-solution-accepted-in-java
# alternative: reverse every two nodes at a time up to k nodes in each outer loop
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @param k, an integer
# @return a ListNode
def reverseKGroup(self, head, k):
pre = dummy_head = ListNode(0)
dummy_head.next = head
while pre.next:
post = pre.next
# move post to the next k+1 node or
# None if there are just k nodes
for _ in xrange(k):
if post:
post = post.next
else:
return dummy_head.next
# reverse the section between pre and post
pre_cursor = pre.next
cursor = pre_cursor.next
while cursor != post:
post_cursor = cursor.next
cursor.next = pre_cursor
pre_cursor = cursor
cursor = post_cursor
# reconnect the head and tail of just
# reverted section and reset pre
pre.next.next = post
pre.next, pre = pre_cursor, pre.next
return dummy_head.next
| 29.183099 | 100 | 0.591699 |
789bccda1b6a828cf65a61f0caa7e2e3f66cf804
| 2,738 |
py
|
Python
|
python_test/test_parser_addition.py
|
lubkoll/friendly-type-erasure
|
719830233a8652ccf18164653b466b0054a617f6
|
[
"MIT"
] | null | null | null |
python_test/test_parser_addition.py
|
lubkoll/friendly-type-erasure
|
719830233a8652ccf18164653b466b0054a617f6
|
[
"MIT"
] | 22 |
2016-08-03T16:51:10.000Z
|
2016-11-23T20:53:03.000Z
|
python_test/test_parser_addition.py
|
lubkoll/friendly-type-erasure
|
719830233a8652ccf18164653b466b0054a617f6
|
[
"MIT"
] | null | null | null |
import unittest
import type_erasure.parser_addition
single_line_test_comments = ['/// comment',
'//! comment',
'// comment',
' \n\r\t/// comment']
multi_line_test_comments = [['/** comment */'],
['/* comment */'],
['/* comment', '*/'],
['/**', '* comment', '*/']]
class TestIsComment(unittest.TestCase):
def test_is_single_line_comment(self):
for comment in single_line_test_comments:
self.assertTrue(type_erasure.parser_addition.is_single_line_comment(comment))
for comment in multi_line_test_comments:
for line in comment:
self.assertFalse(type_erasure.parser_addition.is_single_line_comment(line))
def test_is_multi_line_comment(self):
for comment in single_line_test_comments:
self.assertFalse(type_erasure.parser_addition.is_multi_line_comment(comment, in_multi_line_comment=False))
self.assertFalse(type_erasure.parser_addition.is_multi_line_comment(comment, in_multi_line_comment=True))
for comment in multi_line_test_comments:
for line in comment:
if line is comment[0]:
self.assertTrue(type_erasure.parser_addition.is_multi_line_comment(line, in_multi_line_comment=False))
self.assertFalse(type_erasure.parser_addition.is_multi_line_comment(line, in_multi_line_comment=True))
else:
self.assertFalse(type_erasure.parser_addition.is_multi_line_comment(line, in_multi_line_comment=False))
self.assertTrue(type_erasure.parser_addition.is_multi_line_comment(line, in_multi_line_comment=True))
def test_is_comment(self):
for comment in single_line_test_comments:
self.assertTrue(type_erasure.parser_addition.is_comment(comment, in_multi_line_comment=False))
self.assertTrue(type_erasure.parser_addition.is_comment(comment, in_multi_line_comment=True))
for comment in multi_line_test_comments:
for line in comment:
if line is comment[0]:
self.assertTrue(type_erasure.parser_addition.is_comment(line, in_multi_line_comment=False))
self.assertFalse(type_erasure.parser_addition.is_comment(line, in_multi_line_comment=True))
else:
self.assertFalse(type_erasure.parser_addition.is_comment(line, in_multi_line_comment=False))
self.assertTrue(type_erasure.parser_addition.is_comment(line, in_multi_line_comment=True))
if __name__ == '__main__':
unittest.main()
| 53.686275 | 123 | 0.65851 |
f6b94120d11fb1b65da5149b175def372599d447
| 9,286 |
py
|
Python
|
tests/profiling/test_profiler.py
|
melancholy/dd-trace-py
|
32d463e5465466bc876c85a45880a84824d9b47c
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 308 |
2016-12-07T16:49:27.000Z
|
2022-03-15T10:06:45.000Z
|
tests/profiling/test_profiler.py
|
melancholy/dd-trace-py
|
32d463e5465466bc876c85a45880a84824d9b47c
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1,928 |
2016-11-28T17:13:18.000Z
|
2022-03-31T21:43:19.000Z
|
tests/profiling/test_profiler.py
|
melancholy/dd-trace-py
|
32d463e5465466bc876c85a45880a84824d9b47c
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 311 |
2016-11-27T03:01:49.000Z
|
2022-03-18T21:34:03.000Z
|
import logging
import time
import mock
import pytest
import ddtrace
from ddtrace.profiling import collector
from ddtrace.profiling import event
from ddtrace.profiling import exporter
from ddtrace.profiling import profiler
from ddtrace.profiling.collector import stack
from ddtrace.profiling.exporter import http
def test_status():
p = profiler.Profiler()
assert repr(p.status) == "<ServiceStatus.STOPPED: 'stopped'>"
p.start()
assert repr(p.status) == "<ServiceStatus.RUNNING: 'running'>"
p.stop(flush=False)
assert repr(p.status) == "<ServiceStatus.STOPPED: 'stopped'>"
def test_restart():
p = profiler.Profiler()
p.start()
p.stop(flush=False)
p.start()
p.stop(flush=False)
def test_multiple_stop():
"""Check that the profiler can be stopped twice."""
p = profiler.Profiler()
p.start()
p.stop(flush=False)
p.stop(flush=False)
@pytest.mark.parametrize(
"service_name_var",
("DD_SERVICE", "DD_SERVICE_NAME", "DATADOG_SERVICE_NAME"),
)
def test_default_from_env(service_name_var, monkeypatch):
monkeypatch.setenv("DD_API_KEY", "foobar")
monkeypatch.setenv(service_name_var, "foobar")
prof = profiler.Profiler()
for exp in prof._profiler._scheduler.exporters:
if isinstance(exp, http.PprofHTTPExporter):
assert exp.service == "foobar"
break
else:
pytest.fail("Unable to find HTTP exporter")
def test_service_api(monkeypatch):
monkeypatch.setenv("DD_API_KEY", "foobar")
prof = profiler.Profiler(service="foobar")
assert prof.service == "foobar"
for exp in prof._profiler._scheduler.exporters:
if isinstance(exp, http.PprofHTTPExporter):
assert exp.service == "foobar"
break
else:
pytest.fail("Unable to find HTTP exporter")
def test_tracer_api(monkeypatch):
monkeypatch.setenv("DD_API_KEY", "foobar")
prof = profiler.Profiler(tracer=ddtrace.tracer)
assert prof.tracer == ddtrace.tracer
for col in prof._profiler._collectors:
if isinstance(col, stack.StackCollector):
assert col.tracer == ddtrace.tracer
break
else:
pytest.fail("Unable to find stack collector")
def test_env_default(monkeypatch):
monkeypatch.setenv("DD_API_KEY", "foobar")
monkeypatch.setenv("DD_ENV", "staging")
monkeypatch.setenv("DD_VERSION", "123")
prof = profiler.Profiler()
assert prof.env == "staging"
assert prof.version == "123"
assert prof.url is None
for exp in prof._profiler._scheduler.exporters:
if isinstance(exp, http.PprofHTTPExporter):
assert exp.env == "staging"
assert exp.version == "123"
break
else:
pytest.fail("Unable to find HTTP exporter")
def test_env_api():
prof = profiler.Profiler(env="staging", version="123")
assert prof.env == "staging"
assert prof.version == "123"
assert prof.url is None
for exp in prof._profiler._scheduler.exporters:
if isinstance(exp, http.PprofHTTPExporter):
assert exp.env == "staging"
assert exp.version == "123"
break
else:
pytest.fail("Unable to find HTTP exporter")
def test_tags_api():
prof = profiler.Profiler(env="staging", version="123", tags={"foo": "bar"})
assert prof.env == "staging"
assert prof.version == "123"
assert prof.url is None
assert prof.tags["foo"] == "bar"
for exp in prof._profiler._scheduler.exporters:
if isinstance(exp, http.PprofHTTPExporter):
assert exp.env == "staging"
assert exp.version == "123"
assert exp.tags["foo"] == b"bar"
break
else:
pytest.fail("Unable to find HTTP exporter")
def test_env_agentless(monkeypatch):
monkeypatch.setenv("DD_PROFILING_AGENTLESS", "true")
monkeypatch.setenv("DD_API_KEY", "foobar")
prof = profiler.Profiler()
_check_url(prof, "https://intake.profile.datadoghq.com", "foobar", endpoint_path="/v1/input")
def test_env_agentless_site(monkeypatch):
monkeypatch.setenv("DD_SITE", "datadoghq.eu")
monkeypatch.setenv("DD_PROFILING_AGENTLESS", "true")
monkeypatch.setenv("DD_API_KEY", "foobar")
prof = profiler.Profiler()
_check_url(prof, "https://intake.profile.datadoghq.eu", "foobar", endpoint_path="/v1/input")
def test_env_no_agentless(monkeypatch):
monkeypatch.setenv("DD_PROFILING_AGENTLESS", "false")
monkeypatch.setenv("DD_API_KEY", "foobar")
prof = profiler.Profiler()
_check_url(prof, "http://localhost:8126", "foobar")
def test_url():
prof = profiler.Profiler(url="https://foobar:123")
_check_url(prof, "https://foobar:123")
def _check_url(prof, url, api_key=None, endpoint_path="profiling/v1/input"):
for exp in prof._profiler._scheduler.exporters:
if isinstance(exp, http.PprofHTTPExporter):
assert exp.api_key == api_key
assert exp.endpoint == url
assert exp.endpoint_path == endpoint_path
break
else:
pytest.fail("Unable to find HTTP exporter")
def test_default_tracer_and_url():
try:
ddtrace.tracer.configure(hostname="foobar")
prof = profiler.Profiler(url="https://foobaz:123")
_check_url(prof, "https://foobaz:123")
finally:
ddtrace.tracer.configure(hostname="localhost")
def test_tracer_and_url():
t = ddtrace.Tracer()
t.configure(hostname="foobar")
prof = profiler.Profiler(tracer=t, url="https://foobaz:123")
_check_url(prof, "https://foobaz:123")
def test_tracer_url():
t = ddtrace.Tracer()
t.configure(hostname="foobar")
prof = profiler.Profiler(tracer=t)
_check_url(prof, "http://foobar:8126")
def test_tracer_url_https():
t = ddtrace.Tracer()
t.configure(hostname="foobar", https=True)
prof = profiler.Profiler(tracer=t)
_check_url(prof, "https://foobar:8126")
def test_tracer_url_uds_hostname():
t = ddtrace.Tracer()
t.configure(hostname="foobar", uds_path="/foobar")
prof = profiler.Profiler(tracer=t)
_check_url(prof, "unix://foobar/foobar")
def test_tracer_url_uds():
t = ddtrace.Tracer()
t.configure(uds_path="/foobar")
prof = profiler.Profiler(tracer=t)
_check_url(prof, "unix:///foobar")
def test_env_no_api_key():
prof = profiler.Profiler()
_check_url(prof, "http://localhost:8126")
def test_env_endpoint_url(monkeypatch):
monkeypatch.setenv("DD_AGENT_HOST", "foobar")
monkeypatch.setenv("DD_TRACE_AGENT_PORT", "123")
t = ddtrace.Tracer()
prof = profiler.Profiler(tracer=t)
_check_url(prof, "http://foobar:123")
def test_env_endpoint_url_no_agent(monkeypatch):
monkeypatch.setenv("DD_SITE", "datadoghq.eu")
monkeypatch.setenv("DD_API_KEY", "123")
prof = profiler.Profiler()
_check_url(prof, "http://localhost:8126", "123")
def test_copy():
p = profiler._ProfilerInstance(env="123", version="dwq", service="foobar")
c = p.copy()
assert c == p
assert p.env == c.env
assert p.version == c.version
assert p.service == c.service
assert p.tracer == c.tracer
assert p.tags == c.tags
def test_snapshot(monkeypatch):
class SnapCollect(collector.Collector):
@staticmethod
def collect():
pass
@staticmethod
def snapshot():
return [[event.Event()]]
def _start_service(self):
pass
def _stop_service(self):
pass
all_events = {}
class Exporter(exporter.Exporter):
def export(self, events, *args, **kwargs):
all_events["EVENTS"] = events
class TestProfiler(profiler._ProfilerInstance):
def _build_default_exporters(self, *args, **kargs):
return [Exporter()]
monkeypatch.setenv("DD_PROFILING_UPLOAD_INTERVAL", "1")
p = TestProfiler()
p._collectors = [SnapCollect(p._recorder)]
p.start()
time.sleep(2)
p.stop()
assert len(all_events["EVENTS"][event.Event]) == 1
def test_failed_start_collector(caplog, monkeypatch):
class ErrCollect(collector.Collector):
def _start_service(self):
raise RuntimeError("could not import required module")
def _stop_service(self):
pass
@staticmethod
def collect():
pass
@staticmethod
def snapshot():
raise Exception("error!")
monkeypatch.setenv("DD_PROFILING_UPLOAD_INTERVAL", "1")
class Exporter(exporter.Exporter):
def export(self, events, *args, **kwargs):
pass
class TestProfiler(profiler._ProfilerInstance):
def _build_default_exporters(self, *args, **kargs):
return [Exporter()]
p = TestProfiler()
err_collector = mock.MagicMock(wraps=ErrCollect(p._recorder))
p._collectors = [err_collector]
p.start()
assert caplog.record_tuples == [
(("ddtrace.profiling.profiler", logging.ERROR, "Failed to start collector %r, disabling." % err_collector))
]
time.sleep(2)
p.stop()
assert err_collector.snapshot.call_count == 0
assert caplog.record_tuples == [
(("ddtrace.profiling.profiler", logging.ERROR, "Failed to start collector %r, disabling." % err_collector))
]
| 29.479365 | 115 | 0.659595 |
c82309d9609ddd9518b00637b1dd2c147f96edc1
| 3,410 |
py
|
Python
|
tests/test_header.py
|
SilverLabUCL/PySilverLabNWB
|
358c551192cc77ad0fb7070d42424785f1010b14
|
[
"MIT"
] | 1 |
2020-05-27T09:51:28.000Z
|
2020-05-27T09:51:28.000Z
|
tests/test_header.py
|
SilverLabUCL/PySilverLabNWB
|
358c551192cc77ad0fb7070d42424785f1010b14
|
[
"MIT"
] | 73 |
2018-10-12T16:37:49.000Z
|
2021-04-14T14:36:22.000Z
|
tests/test_header.py
|
SilverLabUCL/PySilverLabNWB
|
358c551192cc77ad0fb7070d42424785f1010b14
|
[
"MIT"
] | null | null | null |
"""Unit style tests for reading from various LabView headers (.ini files)"""
import os
import pytest
from silverlabnwb.header import LabViewHeader, LabViewVersions, Modes
@pytest.fixture(scope="module")
def header(request, ref_data_dir):
"""Create header object from a LabView header file."""
header_file = os.path.join(ref_data_dir, request.param)
header_object = LabViewHeader.from_file(header_file)
return header_object
class TestLabViewHeaders(object):
synthetic_header_path_v231 = 'synthetic experiment Header v231.ini'
synthetic_header_path_v231_no_last_time = 'synthetic experiment Header v231 no last time.ini'
synthetic_header_path_pre2018 = 'Experiment Header.ini'
header_with_unrecognised_line_path = 'unrecognised line Header.ini'
real_life_header_path_v231_pointing = 'real life Experiment Header v231 pointing.ini'
@pytest.mark.parametrize("header, expected_version",
[(synthetic_header_path_v231, LabViewVersions.v231),
(synthetic_header_path_pre2018, LabViewVersions.pre2018),
(real_life_header_path_v231_pointing, LabViewVersions.v231)],
indirect=["header"])
def test_lab_view_version(self, header, expected_version):
assert header.version == expected_version
@pytest.mark.parametrize("header, expected_mode",
[(synthetic_header_path_v231, Modes.miniscan),
(synthetic_header_path_pre2018, Modes.pointing),
(real_life_header_path_v231_pointing, Modes.pointing)],
indirect=["header"])
def test_imaging_mode(self, header, expected_mode):
assert header.imaging_mode == expected_mode
@pytest.mark.parametrize("header, expected_trial_times",
[(synthetic_header_path_v231, [(0.0, 12.345678), (12.567890, 23.456789)]),
(synthetic_header_path_v231_no_last_time, [(0.0, 12.345678), (12.567890, None)])],
indirect=["header"])
def test_trial_times(self, header, expected_trial_times):
assert header.determine_trial_times() == expected_trial_times
@pytest.mark.parametrize("header, expected_number_of_trials",
[(synthetic_header_path_v231, 2),
(synthetic_header_path_v231_no_last_time, 2),
(real_life_header_path_v231_pointing, 29)],
indirect=["header"])
def test_number_of_trials(self, header, expected_number_of_trials):
assert len(header.determine_trial_times()) == expected_number_of_trials
@pytest.mark.parametrize("header",
[synthetic_header_path_pre2018],
indirect=["header"])
def test_pre2018_trial_times_raises_error(self, header):
with pytest.raises(NotImplementedError):
header.determine_trial_times()
def test_unrecognised_line_causes_warning(self):
with pytest.warns(UserWarning) as list_of_warnings:
LabViewHeader.from_file(os.path.join("tests", "data", self.header_with_unrecognised_line_path))
assert len(list_of_warnings) == 1
assert str(list_of_warnings[0].message).startswith("Unrecognised non-blank line")
| 50.895522 | 112 | 0.660411 |
247a25aca621c151a32c285f2f95eeed022f35f2
| 3,393 |
py
|
Python
|
main.py
|
sanathana-dharma/sadha
|
a8e1712c700e710c51d2e6ac2981f0c6df33d17f
|
[
"Unlicense"
] | 1 |
2019-07-16T21:40:00.000Z
|
2019-07-16T21:40:00.000Z
|
main.py
|
sanathana-dharma/sadha
|
a8e1712c700e710c51d2e6ac2981f0c6df33d17f
|
[
"Unlicense"
] | 15 |
2019-08-14T05:37:57.000Z
|
2019-11-11T14:07:51.000Z
|
main.py
|
sanathana-dharma/sadha
|
a8e1712c700e710c51d2e6ac2981f0c6df33d17f
|
[
"Unlicense"
] | null | null | null |
import os
import logging
from flask import Flask, redirect, request, url_for, make_response
from flask_login import (
LoginManager,
current_user
)
from _private import keys
import search
import requests
# Internal imports
from user import User
import config
import treemgr
import contentmgr
import search
import auth
import utils
from utils import render_html
#Search related imports
from _private import keys
import requests
from algoliasearch.search_client import SearchClient
#Elastic
from elasticsearch_dsl import Q
# Custom flask to override variable character in jinja templates ({ to [ because { is being used by Algolia)
class CustomFlask(Flask):
jinja_options = Flask.jinja_options.copy()
jinja_options.update(dict(
variable_start_string='[[', # Default is '{{', I'm changing this because Vue.js uses '{{' / '}}'
variable_end_string=']]',
))
# =========================================================================
# Flask app setup
# Below line replaces "app = Flask(__name__)"
app = CustomFlask(__name__, template_folder='static/templates')
app.config.from_object(config)
app.secret_key = keys.SECRET_KEY
app.debug = True
app.testing = False
if not app.testing:
logging.basicConfig(level=logging.INFO)
# Register the blueprint.
from treemgr.routes import mod
app.register_blueprint(treemgr.routes.mod, url_prefix='/admin/treemgr')
from auth.routes import mod2
app.register_blueprint(auth.routes.mod2, url_prefix='/auth')
from contentmgr.routes import mod3
app.register_blueprint(contentmgr.routes.mod3, url_prefix='/admin/contentmgr')
from search.routes import mod4
app.register_blueprint(search.routes.mod4, url_prefix='/admin/search')
# User session management setup
login_manager = LoginManager()
login_manager.init_app(app)
# Flask-Login helper to retrieve the current user from db
@login_manager.user_loader
def load_user(user_id):
try:
return User.get(user_id)
except:
return None
@login_manager.unauthorized_handler
def unauthorized():
return "You must be logged in to access this content.", 403
# =========================================================================
#Force SSL for all ursl across the site
@app.before_request
def before_request():
if not request.is_secure:
url = request.url.replace("http://", "https://", 1)
code = 301
return redirect(url, code=code)
# Site main entry point
@app.route("/")
def index():
return utils.redirect_admin_only()
@app.route("/admin")
def admin():
return redirect("/admin/treemgr/list")
@app.route("/admin/search")
def searchtest():
di = {}
return render_html("search2.html", di)
@app.route("/admin/es")
def es():
es = search.objects.clsSearch()
query_index = "blog"
'''
query_body = Q (
{
"multi_match": {
"query": "vedas",
"fields": ["name","title"]
}
}
)
'''
query_body={
"query":{
"match_all":{}
}
}
print("Now making first query..here is the result")
return es.search(query_index, query_body)
# =========================================================================
# API (for future)
@app.route("/api/v2/test_response")
def users():
headers = {"Content-Type": "application/json"}
return make_response('Test worked!',
200,
headers=headers)
if __name__ == "__main__":
app.run(ssl_context="adhoc")
| 24.235714 | 108 | 0.665782 |
f0c406a66f8d222457c0d0774669fba6941ee49e
| 1,755 |
py
|
Python
|
hpolib/util/rng_helper.py
|
NeoChaos12/HPOlib3
|
5b20fdc0ebdf133692e84aa442688839e8e69bc3
|
[
"Apache-2.0"
] | null | null | null |
hpolib/util/rng_helper.py
|
NeoChaos12/HPOlib3
|
5b20fdc0ebdf133692e84aa442688839e8e69bc3
|
[
"Apache-2.0"
] | null | null | null |
hpolib/util/rng_helper.py
|
NeoChaos12/HPOlib3
|
5b20fdc0ebdf133692e84aa442688839e8e69bc3
|
[
"Apache-2.0"
] | null | null | null |
""" Helper functions to easily obtain randomState """
from typing import Union
import numpy as np
def get_rng(rng: Union[int, np.random.RandomState, None] = None,
self_rng: Union[int, np.random.RandomState, None] = None) -> np.random.RandomState:
"""
Helper function to obtain RandomState from int or create a new one.
Sometimes a default random state (self_rng) is already available, but a
new random state is desired. In this case ``rng`` is not None and not already
a random state (int or None) -> a new random state is created.
If ``rng`` is already a randomState, it is just returned.
Same if ``rng`` is None, but the default rng is given.
Parameters
----------
rng : int, np.random.RandomState, None
self_rng : np.random.RandomState, None
Returns
-------
np.random.RandomState
"""
if rng is not None:
return _cast_int_to_random_state(rng)
elif rng is None and self_rng is not None:
return _cast_int_to_random_state(self_rng)
else:
return np.random.RandomState()
def _cast_int_to_random_state(rng: Union[int, np.random.RandomState]) -> np.random.RandomState:
"""
Helper function to cast ``rng`` from int to np.random.RandomState if necessary.
Parameters
----------
rng : int, np.random.RandomState
Returns
-------
np.random.RandomState
"""
if isinstance(rng, np.random.RandomState):
return rng
elif int(rng) == rng:
# As seed is sometimes -1 (e.g. if SMAC optimizes a deterministic function) -> use abs()
return np.random.RandomState(np.abs(rng))
else:
raise ValueError(f"{rng} is neither a number nor a RandomState. Initializing RandomState failed")
| 31.909091 | 105 | 0.662108 |
67b96b3ee147a2697cfd9fce5da1d1d00452cd74
| 1,010 |
py
|
Python
|
009/lets_get_dotty.py
|
GeoffRiley/100DaysOfPython
|
3460dda890bea7b77542105a3f01b735a309b87c
|
[
"Unlicense"
] | 2 |
2020-01-04T23:07:57.000Z
|
2020-01-05T10:12:58.000Z
|
009/lets_get_dotty.py
|
GeoffRiley/100DaysOfPython
|
3460dda890bea7b77542105a3f01b735a309b87c
|
[
"Unlicense"
] | null | null | null |
009/lets_get_dotty.py
|
GeoffRiley/100DaysOfPython
|
3460dda890bea7b77542105a3f01b735a309b87c
|
[
"Unlicense"
] | null | null | null |
from dotted.collection import DottedCollection, DottedDict, DottedList
dlist = DottedList([0, 1, 2, 3, [4, 5, 6], 7, 8, [9, 10]])
assert dlist[0] == 0
assert dlist['1'] == 1
assert dlist['4.0'] == 4
assert dlist['4.1'] == 5
assert dlist['5'] == 7
assert dlist[5] == 7
assert dlist[7][1] == 10
assert dlist['7.1'] == 10
ddict = DottedDict({'hello': {'world': {'wide': 'web'}}})
assert ddict['hello'] == {'world': {'wide': 'web'}}
assert ddict['hello.world'] == {'wide': 'web'}
assert ddict['hello.world.wide'] == 'web'
assert ddict.hello == {'world': {'wide': 'web'}}
assert ddict.hello.world == {'wide': 'web'}
assert ddict.hello.world.wide == 'web'
dfact = DottedCollection.factory({
'hello': [{'world': {'wide': ['web', 'web1', 'web2']}}]
})
assert dfact.hello.to_json() == '[{"world": {"wide": ["web", "web1", "web2"]}}]'
assert dfact['hello.0.world'].to_json() == '{"wide": ["web", "web1", "web2"]}'
assert dfact.hello[0].world['wide.0'] == 'web'
assert dfact.hello['0.world'].wide[1] == 'web1'
| 31.5625 | 80 | 0.594059 |
0746ed35c40981895f5ad1cd760817a97d18a884
| 703 |
py
|
Python
|
tests/bbot/test_json_file_reader.py
|
NunoEdgarGFlowHub/rhizome
|
6fcb77c4cc38e662cd805fc5df7845b4c97c5ea0
|
[
"MIT"
] | 8 |
2018-10-30T10:11:33.000Z
|
2020-12-01T05:36:19.000Z
|
tests/bbot/test_json_file_reader.py
|
NunoEdgarGFlowHub/rhizome
|
6fcb77c4cc38e662cd805fc5df7845b4c97c5ea0
|
[
"MIT"
] | 16 |
2018-10-26T00:04:11.000Z
|
2021-04-30T20:59:14.000Z
|
tests/bbot/test_json_file_reader.py
|
SeedVault/bbot-py
|
b94ef5e75411ac4a214f5ac54d04ce00d9108ec0
|
[
"MIT"
] | 3 |
2019-03-11T13:42:47.000Z
|
2019-12-03T13:19:33.000Z
|
"""Unit tests for module bbot.json_file_reader"""
import os
import pytest
from bbot.json_file_reader import JsonFileReader
def test_configuration_file_not_found():
"""Configuration file not found"""
reader = JsonFileReader({})
reader.filename = 'it_doesnt_exist.json'
reader = JsonFileReader({})
with pytest.raises(FileNotFoundError):
_ = reader.read()
def test_read_configuration_file():
"""Configuration file not found"""
reader = JsonFileReader({})
reader.filename = os.path.abspath(os.path.dirname(__file__)
+ "/test_json_file_reader.json")
settings = reader.read()
assert settings['bot']['name'] == "Example"
| 31.954545 | 70 | 0.675676 |
d61fa67c75f074aae64901513b6c741b375f002e
| 4,576 |
py
|
Python
|
yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
|
upfront710/yardstick
|
2c3898f2ca061962cedbfc7435f78b59aa39b097
|
[
"Apache-2.0"
] | 28 |
2017-02-07T07:46:42.000Z
|
2021-06-30T08:11:06.000Z
|
yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
|
upfront710/yardstick
|
2c3898f2ca061962cedbfc7435f78b59aa39b097
|
[
"Apache-2.0"
] | 6 |
2018-01-18T08:00:54.000Z
|
2019-04-11T04:51:41.000Z
|
yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
|
upfront710/yardstick
|
2c3898f2ca061962cedbfc7435f78b59aa39b097
|
[
"Apache-2.0"
] | 46 |
2016-12-13T10:05:47.000Z
|
2021-02-18T07:33:06.000Z
|
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import mock
import unittest
from yardstick.benchmark.scenarios.availability import serviceha
from yardstick.common import exceptions as y_exc
class ServicehaTestCase(unittest.TestCase):
def setUp(self):
host = {
"ip": "10.20.0.5",
"user": "root",
"key_filename": "/root/.ssh/id_rsa"
}
self.ctx = {"nodes": {"node1": host}}
attacker_cfg = {
"fault_type": "kill-process",
"process_name": "nova-api",
"host": "node1"
}
attacker_cfgs = []
attacker_cfgs.append(attacker_cfg)
monitor_cfg = {
"monitor_cmd": "nova image-list",
"monitor_time": 0.1
}
monitor_cfgs = []
monitor_cfgs.append(monitor_cfg)
options = {
"attackers": attacker_cfgs,
"monitors": monitor_cfgs
}
sla = {"outage_time": 5}
self.args = {"options": options, "sla": sla}
self.test__serviceha = serviceha.ServiceHA(self.args, self.ctx)
def test___init__(self):
self.assertEqual(self.test__serviceha.data, {})
self.assertFalse(self.test__serviceha.setup_done)
self.assertFalse(self.test__serviceha.sla_pass)
# NOTE(elfoley): This should be split into test_setup and test_run
# NOTE(elfoley): This should explicitly test outcomes and states
@mock.patch.object(serviceha, 'baseattacker')
@mock.patch.object(serviceha, 'basemonitor')
def test__serviceha_setup_run_successful(self, mock_monitor, *args):
p = serviceha.ServiceHA(self.args, self.ctx)
p.setup()
self.assertTrue(p.setup_done)
mock_monitor.MonitorMgr().verify_SLA.return_value = True
ret = {}
p.run(ret)
p.teardown()
p.setup()
self.assertTrue(p.setup_done)
@mock.patch.object(serviceha, 'baseattacker')
@mock.patch.object(serviceha, 'basemonitor')
def test__serviceha_run_sla_error(self, mock_monitor, *args):
p = serviceha.ServiceHA(self.args, self.ctx)
p.setup()
self.assertEqual(p.setup_done, True)
mock_monitor.MonitorMgr().verify_SLA.return_value = False
ret = {}
self.assertRaises(y_exc.SLAValidationError, p.run, ret)
self.assertEqual(ret['sla_pass'], 0)
@mock.patch.object(serviceha, 'baseattacker')
@mock.patch.object(serviceha, 'basemonitor')
def test__serviceha_run_service_not_found_sla_error(self, mock_monitor,
*args):
p = serviceha.ServiceHA(self.args, self.ctx)
p.setup()
self.assertTrue(p.setup_done)
p.data["kill-process"] = 0
mock_monitor.MonitorMgr().verify_SLA.return_value = True
ret = {}
self.assertRaises(y_exc.SLAValidationError, p.run, ret)
self.assertEqual(ret['sla_pass'], 0)
@mock.patch.object(serviceha, 'baseattacker')
@mock.patch.object(serviceha, 'basemonitor')
def test__serviceha_no_teardown_when_sla_pass(self, mock_monitor,
*args):
p = serviceha.ServiceHA(self.args, self.ctx)
p.setup()
self.assertTrue(p.setup_done)
mock_monitor.MonitorMgr().verify_SLA.return_value = True
ret = {}
p.run(ret)
attacker = mock.Mock()
attacker.mandatory = False
p.attackers = [attacker]
p.teardown()
attacker.recover.assert_not_called()
@mock.patch.object(serviceha, 'baseattacker')
@mock.patch.object(serviceha, 'basemonitor')
def test__serviceha_teardown_when_mandatory(self, mock_monitor,
*args):
p = serviceha.ServiceHA(self.args, self.ctx)
p.setup()
self.assertTrue(p.setup_done)
mock_monitor.MonitorMgr().verify_SLA.return_value = True
ret = {}
p.run(ret)
attacker = mock.Mock()
attacker.mandatory = True
p.attackers = [attacker]
p.teardown()
attacker.recover.assert_called_once()
| 34.666667 | 78 | 0.597028 |
7c8220f04e9c3b543a1041bc58d39d50ef20a901
| 791 |
py
|
Python
|
main.py
|
Apogeum12/MarkovGenerator
|
a51659ac177bd33ec98ba818fd01e55bece94921
|
[
"MIT"
] | null | null | null |
main.py
|
Apogeum12/MarkovGenerator
|
a51659ac177bd33ec98ba818fd01e55bece94921
|
[
"MIT"
] | null | null | null |
main.py
|
Apogeum12/MarkovGenerator
|
a51659ac177bd33ec98ba818fd01e55bece94921
|
[
"MIT"
] | null | null | null |
from Markov import MarkovChain
import pandas as pd
# =============================================================================
# ======================== FUN WITH MARKOV CHAIN ==============================
# ====================== THIS IS VERY SIMPLE DEMO =============================
# =============================================================================
def main():
n = 125
num_sentences = 4
file_name1 = 'dane_disco.txt'
markov = MarkovChain(file_name1)
sentence = markov.generator_sentence(n, num_sentences)
# Save in file for Preprocesing
df1 = pd.DataFrame(columns=['Text'])
df1.Text = sentence
df1.to_csv('Skills/generate.tsv', index=False, sep='\t')
print("Save to file ....")
if __name__ == "__main__":
main()
| 32.958333 | 79 | 0.434893 |
ce16fd73f2c7fd095e1289cb719653e986388e11
| 406 |
py
|
Python
|
data/external/repositories/113677/KaggleBillionWordImputation-master/scripts/unlexicalize_pos.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories/113677/KaggleBillionWordImputation-master/scripts/unlexicalize_pos.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories/113677/KaggleBillionWordImputation-master/scripts/unlexicalize_pos.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | 1 |
2019-12-04T08:23:33.000Z
|
2019-12-04T08:23:33.000Z
|
#!/usr/bin/env python
'''
Unlexicalize POS-tagged sentences to train a POS ngram model.
'''
import sys
from util import tokenize_words, pos_tag
PROGRESS = 1000000
if __name__ == "__main__":
for i, line in enumerate(sys.stdin):
words = tokenize_words(line)
pos = map(pos_tag, words)
print ' '.join(pos)
if i % PROGRESS == 0:
print >>sys.stderr, i
| 21.368421 | 61 | 0.615764 |
7f68d562e8b9d8c59d75fc417d4f3b7163c86c3c
| 808 |
py
|
Python
|
custom_components/ha_cloud_music/api_view.py
|
CrossStone/ha_cloud_music
|
c20c4336319913e33f9a3681fa30647ef3378820
|
[
"MIT"
] | null | null | null |
custom_components/ha_cloud_music/api_view.py
|
CrossStone/ha_cloud_music
|
c20c4336319913e33f9a3681fa30647ef3378820
|
[
"MIT"
] | null | null | null |
custom_components/ha_cloud_music/api_view.py
|
CrossStone/ha_cloud_music
|
c20c4336319913e33f9a3681fa30647ef3378820
|
[
"MIT"
] | null | null | null |
from homeassistant.components.http import HomeAssistantView
from .api_const import DOMAIN_API,DOMAIN
##### 网关控制
class ApiView(HomeAssistantView):
url = DOMAIN_API
name = DOMAIN
requires_auth = True
async def post(self, request):
response = await request.json()
hass = request.app["hass"]
if 'type' in response:
_type = response['type']
if _type == 'web':
mp = hass.data[DOMAIN]
_result = await mp.api_music.get(response['url'])
return self.json(_result)
elif _type == 'proxy':
mp = hass.data[DOMAIN]
_result = await mp.api_music.proxy_get(response['url'])
return self.json(_result)
return self.json(response)
| 32.32 | 71 | 0.569307 |
4bf152a78f12e7ae2f3f065a3b86204d4a3b95d4
| 9,900 |
py
|
Python
|
gsw/test/test_octave.py
|
ocefpaf/python-gsw
|
884fb02c7d7e0b6273e7abd24ef5951632a2653d
|
[
"MIT"
] | 35 |
2015-02-21T02:25:03.000Z
|
2021-11-04T05:06:14.000Z
|
gsw/test/test_octave.py
|
ocefpaf/python-gsw
|
884fb02c7d7e0b6273e7abd24ef5951632a2653d
|
[
"MIT"
] | 28 |
2015-01-05T16:09:35.000Z
|
2021-09-23T17:12:05.000Z
|
gsw/test/test_octave.py
|
ocefpaf/python-gsw
|
884fb02c7d7e0b6273e7abd24ef5951632a2653d
|
[
"MIT"
] | 20 |
2015-02-21T01:37:33.000Z
|
2021-11-19T20:26:51.000Z
|
# -*- coding: utf-8 -*-
"""
Script to test by comparing output from python with that from octave.
Usage (run from this test directory):
python test_octave.py [gsw_matlab_dir]
At present, this assumes by default that the matlab code resides
in a "gsw_matlab_v3_04" directory which has been symlinked into
the test directory. Alternatively, you may specify the directory
as an optional argument.
For functions that return more than one variable, only the
first is tested.
"""
#
# test_octave.py
#
# purpose: Quick "compare test" with octave results.
# author: Filipe P. A. Fernandes
# e-mail: ocefpaf@gmail
# web: http://ocefpaf.tiddlyspot.com/
# created: 13-Jun-2013
# modified: Tue 02 Jul 2013 07:45:17 PM BRT
#
# obs: This is different from `test_check_values.py`, that tests
# against the results in the docs, `test_octave.py` uses same input values to
# run both python and Matlab versions (using Octave).
#
# This is not a thorough test, just an "ad hoc" test to compare when a new
# Matlab version is released.
#
from __future__ import print_function
import os
import sys
from collections import OrderedDict
import numpy as np
from oct2py import Oct2Py
from oct2py import Oct2PyError
import gsw
def get_octave(path):
"""
Return an Oct2Py instance with a suitable path.
It can be useful to have this as a function because
an exception in oct2py kills the module-level instance.
"""
engine = Oct2Py(oned_as='column') # 'column' makes interp_SA_CT work
_ = engine.addpath(engine.genpath(path))
_ = engine.addpath('./')
return engine
def compare_results(name, function, args, octave):
args = [values.get(arg) for arg in args]
print(name)
try: # Python.
res = function(*args)
except:
print('%s: python runtime error' % name)
raise
return 'no_python'
if isinstance(res, tuple):
res = res[0]
try: # Octave.
ocmd = octave.__getattr__('gsw_%s' % (name,))
val = ocmd(*args)
except Exception as err: #Oct2PyError as err:
print('%s: Octave runtime error; %s' % (name, err))
print("python:\n%s" % res)
return 'no_octave'
if isinstance(val, tuple):
val = val[0]
try:
val = val.flatten()
perfect = (val == res).all()
except:
print('%s: Comparison failed' % name)
print("octave:\n%s" % val)
print("python:\n%s" % res)
return 'no_comparison'
if (np.allclose(val, res, rtol=1e-15, atol=0) or
(np.all(np.isnan(val)) and np.all(np.isnan(res)))):
print('%s: Passed' % name)
return 'passed'
else:
print('%s: Failed' % name)
print("octave:\n%s" % val)
print("python:\n%s" % res)
print("python - octave:\n%s" % (res -val))
return 'failed'
print('')
#############################################################################
values = dict(C=np.array([34.5487, 34.7275, 34.8605, 34.6810, 34.568, 34.56]),
t=np.array([28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]),
p=np.array([10., 50., 125., 250., 600., 1000.]),
SP=np.array([34.5487, 34.7275, 34.8605, 34.6810, 34.5680,
34.5600]),
SA=np.array([34.7118, 34.8915, 35.0256, 34.8472, 34.7366,
34.7324]),
CT=np.array([28.7856, 28.4329, 22.8103, 10.2600, 6.8863,
4.4036]),
rho=np.array([1021.8484, 1022.2647, 1024.4207, 1027.7841,
1029.8287, 1031.9916]),
ps=0, pt=0, pp=0,
lat=np.array([4., 4., 4., 4., 4., 4.]),
lon=np.array([188., 188., 188., 188., 188., 188.]),
pt0=np.array([28.8099, 28.4392, 22.7862, 10.2262, 6.8272,
4.3236]),
spycnl=np.array([21.8482, 22.2647, 24.4207, 27.7841, 29.8287,
31.9916]),
A='gn', # or s2 for sigma2; but then spycnl would need to change
# Also, the matlab code is incorrect for the s2 case.
p_i=np.array([500.0, 600.0, 700.0]),
# Baltic.
SAb=np.array([6.6699, 6.7738, 6.9130, 7.3661, 7.5862, 10.3895]),
SPb=np.array([6.5683, 6.6719, 6.8108, 7.2629, 7.4825, 10.2796]),
latb=np.array([59., 59., 59., 59., 59., 59.]),
lonb=np.array([20., 20., 20., 20., 20., 20.])
)
# Functions.
library = OrderedDict({
# library.py
'deltaSA_atlas': (gsw.library.deltaSA_atlas, ('p', 'lon', 'lat')),
'enthalpy_SSO_0_p': (gsw.library.enthalpy_SSO_0_p, ('p')),
'entropy_part': (gsw.library.entropy_part, ('SA', 't', 'p')),
'entropy_part_zerop': (gsw.library.entropy_part_zerop, ('SA', 'pt0')),
'Fdelta': (gsw.library.Fdelta, ('p', 'lon', 'lat')),
'gibbs': (gsw.library.gibbs, ('ps', 'pt', 'pp', 'SA', 't', 'p')),
'gibbs_pt0_pt0': (gsw.library.gibbs_pt0_pt0, ('SA', 'pt0')),
'Hill_ratio_at_SP2': (gsw.library.Hill_ratio_at_SP2, ('t')),
'infunnel': (gsw.library.infunnel, ('SA', 'CT', 'p')),
'interp_ref_cast': (gsw.library.interp_ref_cast, ('spycnl', 'A')),
'interp_SA_CT': (gsw.library.interp_SA_CT, ('SA', 'CT', 'p', 'p_i')),
'SAAR': (gsw.library.SAAR, ('p', 'lon', 'lat')),
'SA_from_SP_Baltic': (gsw.library.SA_from_SP_Baltic, ('SPb', 'lonb',
'latb')),
'specvol_SSO_0_p': (gsw.library.specvol_SSO_0_p, ('p')),
'SP_from_SA_Baltic': (gsw.library.SP_from_SA_Baltic, ('SAb', 'lonb',
'latb')),
# thermodynamics_from_t.py
#'adiabatic_lapse_rate_from_t': (gsw.adiabatic_lapse_rate_from_t,
#('SA', 't', 'p')),
'alpha_wrt_CT_t_exact': (gsw.alpha_wrt_CT_t_exact, ('SA', 't', 'p')),
'alpha_wrt_pt_t_exact': (gsw.alpha_wrt_pt_t_exact, ('SA', 't', 'p')),
'alpha_wrt_t_exact': (gsw.alpha_wrt_t_exact, ('SA', 't', 'p')),
'beta_const_CT_t_exact': (gsw.beta_const_CT_t_exact, ('SA', 't', 'p')),
'beta_const_pt_t_exact': (gsw.beta_const_pt_t_exact, ('SA', 't', 'p')),
'beta_const_t_exact': (gsw.beta_const_t_exact, ('SA', 't', 'p')),
'chem_potential_relative_t_exact': (gsw.chem_potential_relative_t_exact,
('SA', 't', 'p')),
'chem_potential_salt_t_exact': (gsw.chem_potential_salt_t_exact,
('SA', 't', 'p')),
'chem_potential_water_t_exact': (gsw.chem_potential_water_t_exact,
('SA', 't', 'p')),
'cp_t_exact': (gsw.cp_t_exact, ('SA', 't', 'p')),
#'deltaSA_from_rho_t_exact': (gsw.deltaSA_from_rho_t_exact,
#('rho', 'SP', 't', 'p')),
'dynamic_enthalpy_t_exact': (gsw.dynamic_enthalpy_t_exact,
('SA', 't', 'p')),
'enthalpy_t_exact': (gsw.enthalpy_t_exact, ('SA', 't', 'p')),
'entropy_t_exact': (gsw.entropy_t_exact, ('SA', 't', 'p')),
'Helmholtz_energy_t_exact': (gsw.Helmholtz_energy_t_exact,
('SA', 't', 'p')),
'internal_energy_t_exact': (gsw.internal_energy_t_exact, ('SA', 't', 'p')),
'isochoric_heat_cap_t_exact': (gsw.isochoric_heat_cap_t_exact,
('SA', 't', 'p')),
'kappa_const_t_exact': (gsw.kappa_const_t_exact, ('SA', 't', 'p')),
'kappa_t_exact': (gsw.kappa_t_exact, ('SA', 't', 'p')),
'osmotic_coefficient_t_exact': (gsw.osmotic_coefficient_t_exact,
('SA', 't', 'p')),
'osmotic_pressure_t_exact': (gsw.osmotic_pressure_t_exact,
('SA', 't', 'pw')),
'pot_rho_t_exact': (gsw.pot_rho_t_exact, ('SA', 't', 'p', 'p_ref')),
'rho_t_exact': (gsw.rho_t_exact, ('SA', 't', 'p')),
'SA_from_rho_t_exact': (gsw.SA_from_rho_t_exact, ('rho', 't', 'p')),
#'SA_from_rho_t': (gsw.SA_from_rho_t, ('rho', 't', 'p')),
'sigma0_pt0_exact': (gsw.sigma0_pt0_exact, ('SA', 'pt0')),
'sound_speed_t_exact': (gsw.sound_speed_t_exact, ('SA', 't', 'p')),
'specvol_anom_t_exact': (gsw.specvol_anom_t_exact, ('SA', 't', 'p')),
'specvol_t_exact': (gsw.specvol_t_exact, ('SA', 't', 'p')),
't_from_rho_exact': (gsw.t_from_rho_exact, ('rho', 'SA', 'p')),
't_maxdensity_exact': (gsw.t_maxdensity_exact, ('SA', 'p')),
# absolute_salinity_sstar_ct.py
'SA_from_SP': (gsw.SA_from_SP, ('SP', 'p', 'lon', 'lat'))})
if __name__ == '__main__':
try:
path = sys.argv[1]
except IndexError:
path = "gsw_matlab_v3_04"
if not os.path.exists(path):
raise ValueError("matlab gsw path %s not found" % path)
octave = get_octave(path)
#print('\n'.join(octave.path().split(':')))
# We have to supply a fake superiorfloat function for octave.
# We are writing it in the local directory, which is not a nice
# thing to do; maybe this can be improved later.
_sfloat = """function out = superiorfloat(varargin)
out = 'double';
"""
open('superiorfloat.m', 'wt').write(_sfloat)
outcomes = ['passed', 'no_octave', 'no_python', 'failed', 'no_comparison']
results = dict([(k, list()) for k in outcomes])
for name, (function, args) in library.items():
ret = compare_results(name, function, args, octave)
results[ret].append(name)
#os.remove('superiorfloat.m')
print('\nSummary:')
print('passed:\n %s' % '\n '.join(results['passed']))
print('octave call failed:\n %s' % '\n '.join(results['no_octave']))
print('python call failed:\n %s' % '\n '.join(results['no_python']))
print('results did not match:\n %s' % '\n '.join(results['failed']))
print('comparison failed:\n %s' % '\n '.join(results['no_comparison']))
print('')
| 40.243902 | 79 | 0.564646 |
96a5c0b638f5ac6c8499c3d62c643f37eddcdbb1
| 6,161 |
py
|
Python
|
Quick Terminal-Interface Database/store_download.py
|
Ddottsai/Code-Storage
|
fe8753e3d93dfa69822ae06b64cc7d3b259a4434
|
[
"MIT"
] | null | null | null |
Quick Terminal-Interface Database/store_download.py
|
Ddottsai/Code-Storage
|
fe8753e3d93dfa69822ae06b64cc7d3b259a4434
|
[
"MIT"
] | null | null | null |
Quick Terminal-Interface Database/store_download.py
|
Ddottsai/Code-Storage
|
fe8753e3d93dfa69822ae06b64cc7d3b259a4434
|
[
"MIT"
] | null | null | null |
from resource_database import resource_database
import sys
import textwrap
import re
from prompt_toolkit import PromptSession
def get_input(query,return_lowercase=True):
while True:
init_user_input = ps.prompt(query)
user_input = init_user_input.lower()
lst_input = re.split("[, ]+",user_input)
if lst_input[0] == "show":
print()
attr = lst_input[1] if len(lst_input) > 1 else re.split("[, ]+",ps.prompt("Attribute to show: "))[0]
if attr in ["tag","tags"]:
cats = ""
while True:
cats = lst_input[2] if len(lst_input) > 2 else ps.prompt("Categories to search for tags (type 'all' to include all tags): ")
if cats == "show":
resource_database.show(["cats"])
else:
break
resource_database.show(["tags",re.split("[, ]+", cats)])
elif attr in ["alias","aliases"]:
resource_database.show(["aliases"])
elif attr in ["cat","cats"]:
resource_database.show(["cats"])
elif attr in ["fam","fams","family","families"]:
resource_database.show(["families"])
elif attr in ["option","options","help"]:
print("Options are: tag, alias, cat, fam")
else:
print("'"+attr+"' is not a valid field. choose from tag, alias, cat, and fam.")
"""
if lst_input[1] == "key":
query = ["keys",re.split("[, ]+",input(
"Categories to search for keys (type 'all' to include all keys): "))]
resource_database.show(query)
"""
print()
else:
if return_lowercase:
return user_input
else:
return init_user_input
wrapper = textwrap.TextWrapper(initial_indent=" ")
temp_input = ""
dct = {"categories":[],"tags":[],"keys":[],"family":None,"summary":None}
fields = [i for i in dct.keys()]
index = 0
import glob,os
ps = PromptSession()
old_path = max(glob.iglob(os.path.expanduser('~/Downloads/*')), key=lambda a:os.stat(a).st_birthtime)
try:
while True:
print("\n\n")
print(old_path+"\n")
for f in fields:
if dct[f] is not None:
if isinstance(dct[f],list):
print(f + ": " + resource_database.get_contents(dct[f]))
else:
if f == "ref" and len(dct[f]) > 100:
print(f + ": " + dct[f][:100])
else:
print(f + ": " + dct[f])
else:
print(f + ":")
if index >= len(dct):
if len(dct["categories"]) == 0:
print("Please specify at least one valid category.")
else:
confirmation = get_input("\nAre these specs okay? ")
if confirmation == "yes":
break
index = 0
continue
if fields[index] != "summary":
temp = get_input("\nInput for "+fields[index].upper()+": ",return_lowercase = True)
else:
temp = get_input("\nInput for "+fields[index].upper()+": ",return_lowercase = False)
if fields[index] in ["keys","tags"]:
temp = re.split("[,]+",temp)
elif fields[index] == "categories":
temp = re.split("[, ]+",temp)
if isinstance(dct[fields[index]],list):
if len(temp) == 0:
first = []
else:
for i,d in enumerate(temp):
if len(d) > 0 and d[0] == " ":
temp[i] = d[1:]
i = temp[0].find(" ")
if i != -1 and temp[0][:i] in ["replace","set","add","append"]:
first = temp[0][:i]
a = [temp[0][:i],temp[0][i+1:]]
a.extend(temp[1:])
temp = a
else:
first = temp[0]
else:
if len(temp) > 0 and temp[0] == " ":
temp = temp[1:]
if temp.find(" ") != -1:
first = temp[:temp.find(" ")]
else:
first = temp
if len(temp) == 0 or first in ["skip","pass",""]:
pass
elif first in ["back"]:
index -= 1
continue
elif first in ["delete","clear"]:
if isinstance(dct[fields[index]],list):
dct[fields[index]] = []
else:
dct[fields[index]] = None
continue
elif first in ["replace","set"]:
if len(temp) > 1:
dct[fields[index]] = temp[1:]
elif first in ["add","append"]:
del temp[0]
if not isinstance(dct[fields[index]], list):
print("Cannot add more than one value to this field.")
continue
else:
if len(temp) != 0:
if isinstance(dct[fields[index]], list):
dct[fields[index]].extend(temp)
else:
dct[fields[index]] = temp
wrong_cats = []
i = 0
while i < len(dct["categories"]):
if not resource_database.open_cat(dct["categories"][i]):
wrong_cats.append(dct["categories"][i])
del dct["categories"][i]
else:
i += 1
index+=1
metadata = {"ref":"download","cats":dct["categories"],"tags":dct["tags"],
"family":dct["family"],"keys":dct["keys"],"ref_type":"file",
"summary":dct["summary"]}
for k,v in metadata.items():
if v == []:
metadata[k] = None
resource_database.add_ref(ref=metadata["ref"],keys=metadata["keys"],
cats=metadata["cats"],tags=metadata["tags"],family=metadata["family"],
summary=metadata["summary"],ref_type=metadata["ref_type"])
except Exception as e:
print(e)
finally:
print("Exiting program.")
resource_database.end()
| 39.242038 | 144 | 0.470378 |
7da77dbb700e2fed3da9e24f96ca6f0fc070908b
| 2,141 |
py
|
Python
|
JumpscalePortalClassic/portal/Validators.py
|
threefoldtech/jumpscale_portal_classic
|
d14fe4a17c0486df7a87d149e900746654091fda
|
[
"Apache-2.0"
] | null | null | null |
JumpscalePortalClassic/portal/Validators.py
|
threefoldtech/jumpscale_portal_classic
|
d14fe4a17c0486df7a87d149e900746654091fda
|
[
"Apache-2.0"
] | null | null | null |
JumpscalePortalClassic/portal/Validators.py
|
threefoldtech/jumpscale_portal_classic
|
d14fe4a17c0486df7a87d149e900746654091fda
|
[
"Apache-2.0"
] | null | null | null |
from JumpscalePortalClassic.portal import exceptions
import re
INT = r"""(?:[+-]?(?:[0-9]+))"""
BASE10NUM = r"""(?<![0-9.+-])(?>[+-]?(?:(?:[0-9]+(?:\.[0-9]+)?)|(?:\.[0-9]+)))"""
NUMBER = r"""(?<![0-9.+-])(?>[+-]?(?:(?:[0-9]+(?:\.[0-9]+)?)|(?:\.[0-9]+)))"""
BASE16NUM = r"""(?<![0-9A-Fa-f])(?:[+-]?(?:0x)?(?:[0-9A-Fa-f]+))"""
BASE16FLOAT = r"""\b(?<![0-9A-Fa-f.])(?:[+-]?(?:0x)?(?:(?:[0-9A-Fa-f]+(?:\.[0-9A-Fa-f]*)?)|(?:\.[0-9A-Fa-f]+)))\b"""
POSINT = r"""\b(?:[1-9][0-9]*)\b"""
NONNEGINT = r"""\b(?:[0-9]+)\b"""
WORD = r"""\b\w+\b"""
NOTSPACE = r"""\S+"""
SPACE = r"""\s*"""
DATA = r""".*?"""
GREEDYDATA = r""".*"""
QUOTEDSTRING = r"""(?>(?<!\\)(?>"(?>\\.|[^\\"]+)+"|""|(?>'(?>\\.|[^\\']+)+')|''|(?>`(?>\\.|[^\\`]+)+`)|``))"""
UUID = r"""[A-Fa-f0-9]{8}-(?:[A-Fa-f0-9]{4}-){3}[A-Fa-f0-9]{12}"""
def NAME(val):
for i in r"""<>"'""":
if i in val:
raise exceptions.BadRequest('The name you entered contains invalid characters')
if len(val) < 2:
raise exceptions.BadRequest('The name cannot be shorter than two characters')
return True
def IP(val):
return sum([x.isdigit() and 0 <= int(x) <= 255 for x in val.split('.')]) == 4
def PASSWORD(val):
return 8 <= len(val) <= 60
def USERNAME(val):
m = re.match("[a-zA-Z0-9._-]+(?:@[a-zA-Z0-9._-]+)?", val)
if 2 < len(val.split('@')[0]) < 40 and m and m.end() == len(val):
return True
else:
raise exceptions.BadRequest('Usernames can only contain alphanumeric characters, dots, dashes, underscores and should be between 2 and 40 characters')
def GROUPNAME(val):
m = re.match("[a-zA-Z0-9._-]+", val)
if 2 < len(val) < 40 and m and m.end() == len(val):
return True
else:
raise exceptions.BadRequest('Groupnames can only contain alphanumeric characters, dots, dashes, underscores and should be between 2 and 40 characters')
def EMAIL(val):
atpos = val.find('@')
dotpos = val.find('.')
if atpos == -1 or dotpos == -1:
raise exceptions.BadRequest('Invalid Email Address given')
elif dotpos < atpos:
raise exceptions.BadRequest('Invalid Email Address given')
| 35.098361 | 159 | 0.514713 |
c1a36b50afea149ef1efe1a8e1a9e3a6b36877c6
| 1,484 |
py
|
Python
|
util/.ipynb_checkpoints/fcst_utils-checkpoint.py
|
aws-samples/amazon-sagemaker-studio-lab-cold-start-forecasting-using-autogluon
|
5a690284c6a99f110c7e649d6b7734305e2d2d10
|
[
"MIT-0"
] | 1 |
2022-03-07T04:29:25.000Z
|
2022-03-07T04:29:25.000Z
|
util/fcst_utils.py
|
whosivan/amazon-sagemaker-studio-lab-cold-start-forecasting-using-autogluon
|
d113946f83f9c3030c86b24ba0ffaf36a84dee5e
|
[
"MIT-0"
] | null | null | null |
util/fcst_utils.py
|
whosivan/amazon-sagemaker-studio-lab-cold-start-forecasting-using-autogluon
|
d113946f83f9c3030c86b24ba0ffaf36a84dee5e
|
[
"MIT-0"
] | null | null | null |
import time
import json
import gzip
import os
import shutil
import boto3
import botocore.exceptions
import pandas as pd
import matplotlib.pyplot as plt
from tqdm.auto import trange
def plot_forecasts(fcsts, exact, freq = '1H', forecastHorizon=24, time_back = 80):
p10 = pd.DataFrame(fcsts['Forecast']['Predictions']['p10'])
p50 = pd.DataFrame(fcsts['Forecast']['Predictions']['p50'])
p90 = pd.DataFrame(fcsts['Forecast']['Predictions']['p90'])
pred_int = p50['Timestamp'].apply(lambda x: pd.Timestamp(x))
fcst_start_date = pred_int.iloc[0]
fcst_end_date = pred_int.iloc[-1]
time_int = exact['timestamp'].apply(lambda x: pd.Timestamp(x))
plt.plot(time_int[-time_back:],exact['target'].values[-time_back:], color = 'r')
plt.plot(pred_int, p50['Value'].values, color = 'k')
plt.fill_between(pred_int,
p10['Value'].values,
p90['Value'].values,
color='b', alpha=0.3);
plt.axvline(x=pd.Timestamp(fcst_start_date), linewidth=3, color='g', ls='dashed')
plt.axvline(x=pd.Timestamp(fcst_end_date), linewidth=3, color='g', ls='dashed')
plt.xticks(rotation=30)
plt.legend(['Target', 'Forecast'], loc = 'lower left')
def extract_gz( src, dst ):
print( f"Extracting {src} to {dst}" )
with open(dst, 'wb') as fd_dst:
with gzip.GzipFile( src, 'rb') as fd_src:
data = fd_src.read()
fd_dst.write(data)
print("Done.")
| 32.977778 | 85 | 0.633423 |
afa77e15b73b0232009092f5db88d0b670e91e38
| 5,925 |
py
|
Python
|
build/PureCloudPlatformClientV2/models/campaign_rule_action_entities.py
|
cjohnson-ctl/platform-client-sdk-python
|
38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100
|
[
"MIT"
] | 10 |
2019-02-22T00:27:08.000Z
|
2021-09-12T23:23:44.000Z
|
libs/PureCloudPlatformClientV2/models/campaign_rule_action_entities.py
|
rocketbot-cl/genesysCloud
|
dd9d9b5ebb90a82bab98c0d88b9585c22c91f333
|
[
"MIT"
] | 5 |
2018-06-07T08:32:00.000Z
|
2021-07-28T17:37:26.000Z
|
libs/PureCloudPlatformClientV2/models/campaign_rule_action_entities.py
|
rocketbot-cl/genesysCloud
|
dd9d9b5ebb90a82bab98c0d88b9585c22c91f333
|
[
"MIT"
] | 6 |
2020-04-09T17:43:07.000Z
|
2022-02-17T08:48:05.000Z
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class CampaignRuleActionEntities(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
CampaignRuleActionEntities - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'campaigns': 'list[DomainEntityRef]',
'sequences': 'list[DomainEntityRef]',
'use_triggering_entity': 'bool'
}
self.attribute_map = {
'campaigns': 'campaigns',
'sequences': 'sequences',
'use_triggering_entity': 'useTriggeringEntity'
}
self._campaigns = None
self._sequences = None
self._use_triggering_entity = None
@property
def campaigns(self):
"""
Gets the campaigns of this CampaignRuleActionEntities.
The list of campaigns for a CampaignRule to monitor. Required if the CampaignRule has any conditions that run on a campaign.
:return: The campaigns of this CampaignRuleActionEntities.
:rtype: list[DomainEntityRef]
"""
return self._campaigns
@campaigns.setter
def campaigns(self, campaigns):
"""
Sets the campaigns of this CampaignRuleActionEntities.
The list of campaigns for a CampaignRule to monitor. Required if the CampaignRule has any conditions that run on a campaign.
:param campaigns: The campaigns of this CampaignRuleActionEntities.
:type: list[DomainEntityRef]
"""
self._campaigns = campaigns
@property
def sequences(self):
"""
Gets the sequences of this CampaignRuleActionEntities.
The list of sequences for a CampaignRule to monitor. Required if the CampaignRule has any conditions that run on a sequence.
:return: The sequences of this CampaignRuleActionEntities.
:rtype: list[DomainEntityRef]
"""
return self._sequences
@sequences.setter
def sequences(self, sequences):
"""
Sets the sequences of this CampaignRuleActionEntities.
The list of sequences for a CampaignRule to monitor. Required if the CampaignRule has any conditions that run on a sequence.
:param sequences: The sequences of this CampaignRuleActionEntities.
:type: list[DomainEntityRef]
"""
self._sequences = sequences
@property
def use_triggering_entity(self):
"""
Gets the use_triggering_entity of this CampaignRuleActionEntities.
If true, the CampaignRuleAction will apply to the same entity that triggered the CampaignRuleCondition.
:return: The use_triggering_entity of this CampaignRuleActionEntities.
:rtype: bool
"""
return self._use_triggering_entity
@use_triggering_entity.setter
def use_triggering_entity(self, use_triggering_entity):
"""
Sets the use_triggering_entity of this CampaignRuleActionEntities.
If true, the CampaignRuleAction will apply to the same entity that triggered the CampaignRuleCondition.
:param use_triggering_entity: The use_triggering_entity of this CampaignRuleActionEntities.
:type: bool
"""
self._use_triggering_entity = use_triggering_entity
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 32.377049 | 132 | 0.62346 |
3516caacdebb206dac8aba918a9f507876024e6d
| 918 |
py
|
Python
|
handlers/RepositoryHandler.py
|
zd08135/qihoo360-problem
|
918fa984df91eff0738dafaca091766bf1ac353d
|
[
"MIT"
] | null | null | null |
handlers/RepositoryHandler.py
|
zd08135/qihoo360-problem
|
918fa984df91eff0738dafaca091766bf1ac353d
|
[
"MIT"
] | null | null | null |
handlers/RepositoryHandler.py
|
zd08135/qihoo360-problem
|
918fa984df91eff0738dafaca091766bf1ac353d
|
[
"MIT"
] | null | null | null |
from handlers.BaseHandler import BaseHandler
from handlers.FeedTypes import FeedType
class RepositoryHandler(BaseHandler):
def initialize(self, handler):
BaseHandler.initialize(self, handler)
def create_repository(self, data):
uid = data["uid"]
name = data["name"]
rid = self.handler.repository_model.create_repository(uid, name)
self.handler.feed_model.add_feed(uid, FeedType.CREATE_REPOSITORY, {"rid": rid})
return self.success_responce({'rid': rid})
def create_fork(self, data):
uid = data["uid"]
repository_id = data["rid"]
new_rid = self.handler.repository_model.create_fork(uid, repository_id)
if not new_rid:
return self.failure_responce(400, "fork failed")
self.handler.feed_model.add_feed(uid, FeedType.CREATE_FORK, {"rid": new_rid})
return self.success_responce({'rid': new_rid})
| 32.785714 | 87 | 0.679739 |
3ef9b2f1b407560815f6760a7bdbdfb78a5b48a8
| 292 |
py
|
Python
|
config.py
|
louissobel/Drapache
|
564aaba08ee6929043ccd68027c6b01920dbb40a
|
[
"MIT"
] | 9 |
2015-03-20T05:48:37.000Z
|
2018-12-17T09:32:31.000Z
|
config.py
|
louissobel/Drapache
|
564aaba08ee6929043ccd68027c6b01920dbb40a
|
[
"MIT"
] | null | null | null |
config.py
|
louissobel/Drapache
|
564aaba08ee6929043ccd68027c6b01920dbb40a
|
[
"MIT"
] | 2 |
2018-01-28T11:23:58.000Z
|
2018-07-30T23:38:51.000Z
|
"""
Provides configuration for the drapache server
"""
################### flat file
# subdomain file path
SUBDOMAIN_FILE = ""
################## other stuff
DEFAULT_PORT = 5501 #just because
################## dropbox config
# api app-key
APP_KEY = ''
# api app-secret
APP_SECRET = ''
| 14.6 | 46 | 0.575342 |
be33ca5c6065c0e9a9a0c0e6a92369b40efa8b4a
| 50,811 |
py
|
Python
|
aries_cloudagent/protocols/present_proof/dif/pres_exch_handler.py
|
zanost/aries-cloudagent-python
|
9541edfb957742e9db8082981c8397b45f8de987
|
[
"Apache-2.0"
] | 247 |
2019-07-02T21:10:21.000Z
|
2022-03-30T13:55:33.000Z
|
aries_cloudagent/protocols/present_proof/dif/pres_exch_handler.py
|
zanost/aries-cloudagent-python
|
9541edfb957742e9db8082981c8397b45f8de987
|
[
"Apache-2.0"
] | 1,462 |
2019-07-02T20:57:30.000Z
|
2022-03-31T23:13:35.000Z
|
aries_cloudagent/protocols/present_proof/dif/pres_exch_handler.py
|
zanost/aries-cloudagent-python
|
9541edfb957742e9db8082981c8397b45f8de987
|
[
"Apache-2.0"
] | 377 |
2019-06-20T21:01:31.000Z
|
2022-03-30T08:27:53.000Z
|
"""
Utilities for dif presentation exchange attachment.
General Flow:
create_vp ->
make_requirement [create a Requirement from SubmissionRequirements and Descriptors] ->
apply_requirement [filter credentials] ->
merge [return applicable credential list and descriptor_map for presentation_submission]
returns VerifiablePresentation
"""
import pytz
import re
from datetime import datetime
from dateutil.parser import parse as dateutil_parser
from dateutil.parser import ParserError
from jsonpath_ng import parse
from pyld import jsonld
from pyld.jsonld import JsonLdProcessor
from typing import Sequence, Optional, Tuple
from unflatten import unflatten
from uuid import uuid4
from ....core.error import BaseError
from ....core.profile import Profile
from ....did.did_key import DIDKey
from ....storage.vc_holder.vc_record import VCRecord
from ....vc.ld_proofs import (
Ed25519Signature2018,
BbsBlsSignature2020,
BbsBlsSignatureProof2020,
WalletKeyPair,
DocumentLoader,
)
from ....vc.ld_proofs.constants import (
SECURITY_CONTEXT_BBS_URL,
EXPANDED_TYPE_CREDENTIALS_CONTEXT_V1_VC_TYPE,
)
from ....vc.vc_ld.prove import sign_presentation, create_presentation, derive_credential
from ....wallet.base import BaseWallet, DIDInfo
from ....wallet.error import WalletError, WalletNotFoundError
from ....wallet.key_type import KeyType
from .pres_exch import (
PresentationDefinition,
InputDescriptors,
DIFField,
Filter,
Constraints,
SubmissionRequirements,
Requirement,
SchemaInputDescriptor,
InputDescriptorMapping,
PresentationSubmission,
)
PRESENTATION_SUBMISSION_JSONLD_CONTEXT = (
"https://identity.foundation/presentation-exchange/submission/v1"
)
PRESENTATION_SUBMISSION_JSONLD_TYPE = "PresentationSubmission"
class DIFPresExchError(BaseError):
"""Base class for DIF Presentation Exchange related errors."""
class DIFPresExchHandler:
"""Base Presentation Exchange Handler."""
ISSUE_SIGNATURE_SUITE_KEY_TYPE_MAPPING = {
Ed25519Signature2018: KeyType.ED25519,
}
if BbsBlsSignature2020.BBS_SUPPORTED:
ISSUE_SIGNATURE_SUITE_KEY_TYPE_MAPPING[BbsBlsSignature2020] = KeyType.BLS12381G2
DERIVE_SIGNATURE_SUITE_KEY_TYPE_MAPPING = {
BbsBlsSignatureProof2020: KeyType.BLS12381G2,
}
PROOF_TYPE_SIGNATURE_SUITE_MAPPING = {
suite.signature_type: suite
for suite, key_type in ISSUE_SIGNATURE_SUITE_KEY_TYPE_MAPPING.items()
}
DERIVED_PROOF_TYPE_SIGNATURE_SUITE_MAPPING = {
suite.signature_type: suite
for suite, key_type in DERIVE_SIGNATURE_SUITE_KEY_TYPE_MAPPING.items()
}
def __init__(
self,
profile: Profile,
pres_signing_did: str = None,
proof_type: str = None,
):
"""Initialize PresExchange Handler."""
super().__init__()
self.profile = profile
self.pres_signing_did = pres_signing_did
if not proof_type:
self.proof_type = Ed25519Signature2018.signature_type
else:
self.proof_type = proof_type
self.is_holder = False
async def _get_issue_suite(
self,
*,
wallet: BaseWallet,
issuer_id: str,
):
"""Get signature suite for signing presentation."""
did_info = await self._did_info_for_did(issuer_id)
verification_method = self._get_verification_method(issuer_id)
# Get signature class based on proof type
SignatureClass = self.PROOF_TYPE_SIGNATURE_SUITE_MAPPING[self.proof_type]
# Generically create signature class
return SignatureClass(
verification_method=verification_method,
key_pair=WalletKeyPair(
wallet=wallet,
key_type=self.ISSUE_SIGNATURE_SUITE_KEY_TYPE_MAPPING[SignatureClass],
public_key_base58=did_info.verkey if did_info else None,
),
)
async def _get_derive_suite(
self,
*,
wallet: BaseWallet,
):
"""Get signature suite for deriving credentials."""
# Get signature class based on proof type
SignatureClass = self.DERIVED_PROOF_TYPE_SIGNATURE_SUITE_MAPPING[
"BbsBlsSignatureProof2020"
]
# Generically create signature class
return SignatureClass(
key_pair=WalletKeyPair(
wallet=wallet,
key_type=self.DERIVE_SIGNATURE_SUITE_KEY_TYPE_MAPPING[SignatureClass],
),
)
def _get_verification_method(self, did: str):
"""Get the verification method for a did."""
if did.startswith("did:key:"):
return DIDKey.from_did(did).key_id
elif did.startswith("did:sov:"):
# key-1 is what uniresolver uses for key id
return did + "#key-1"
else:
raise DIFPresExchError(
f"Unable to get retrieve verification method for did {did}"
)
async def _did_info_for_did(self, did: str) -> DIDInfo:
"""Get the did info for specified did.
If the did starts with did:sov it will remove the prefix for
backwards compatibility with not fully qualified did.
Args:
did (str): The did to retrieve from the wallet.
Raises:
WalletNotFoundError: If the did is not found in the wallet.
Returns:
DIDInfo: did information
"""
async with self.profile.session() as session:
wallet = session.inject(BaseWallet)
# If the did starts with did:sov we need to query without
if did.startswith("did:sov:"):
return await wallet.get_local_did(did.replace("did:sov:", ""))
# All other methods we can just query
return await wallet.get_local_did(did)
async def get_sign_key_credential_subject_id(
self, applicable_creds: Sequence[VCRecord]
) -> Tuple[Optional[str], Sequence[dict]]:
"""Get the issuer_id and filtered_creds from enclosed credentials subject_ids."""
issuer_id = None
filtered_creds_list = []
if self.proof_type == BbsBlsSignature2020.signature_type:
reqd_key_type = KeyType.BLS12381G2
else:
reqd_key_type = KeyType.ED25519
for cred in applicable_creds:
if cred.subject_ids and len(cred.subject_ids) > 0:
if not issuer_id:
for cred_subject_id in cred.subject_ids:
if not cred_subject_id.startswith("urn:"):
did_info = await self._did_info_for_did(cred_subject_id)
if did_info.key_type == reqd_key_type:
issuer_id = cred_subject_id
filtered_creds_list.append(cred.cred_value)
break
else:
if issuer_id in cred.subject_ids:
filtered_creds_list.append(cred.cred_value)
else:
raise DIFPresExchError(
"Applicable credentials have different credentialSubject.id, "
"multiple proofs are not supported currently"
)
return (issuer_id, filtered_creds_list)
async def to_requirement(
self, sr: SubmissionRequirements, descriptors: Sequence[InputDescriptors]
) -> Requirement:
"""
Return Requirement.
Args:
sr: submission_requirement
descriptors: list of input_descriptors
Raises:
DIFPresExchError: If not able to create requirement
"""
input_descriptors = []
nested = []
total_count = 0
if sr._from:
if sr._from != "":
for descriptor in descriptors:
if self.contains(descriptor.groups, sr._from):
input_descriptors.append(descriptor)
total_count = len(input_descriptors)
if total_count == 0:
raise DIFPresExchError(f"No descriptors for from: {sr._from}")
else:
for submission_requirement in sr.from_nested:
try:
# recursion logic
requirement = await self.to_requirement(
submission_requirement, descriptors
)
nested.append(requirement)
except Exception as err:
raise DIFPresExchError(
(
"Error creating requirement from "
f"nested submission_requirements, {err}"
)
)
total_count = len(nested)
count = sr.count
if sr.rule == "all":
count = total_count
requirement = Requirement(
count=count,
maximum=sr.maximum,
minimum=sr.minimum,
input_descriptors=input_descriptors,
nested_req=nested,
)
return requirement
async def make_requirement(
self,
srs: Sequence[SubmissionRequirements] = None,
descriptors: Sequence[InputDescriptors] = None,
) -> Requirement:
"""
Return Requirement.
Creates and return Requirement with nesting if required
using to_requirement()
Args:
srs: list of submission_requirements
descriptors: list of input_descriptors
Raises:
DIFPresExchError: If not able to create requirement
"""
if not srs:
srs = []
if not descriptors:
descriptors = []
if len(srs) == 0:
requirement = Requirement(
count=len(descriptors),
input_descriptors=descriptors,
)
return requirement
requirement = Requirement(
count=len(srs),
nested_req=[],
)
for submission_requirement in srs:
try:
requirement.nested_req.append(
await self.to_requirement(submission_requirement, descriptors)
)
except Exception as err:
raise DIFPresExchError(
f"Error creating requirement inside to_requirement function, {err}"
)
return requirement
def is_len_applicable(self, req: Requirement, val: int) -> bool:
"""
Check and validate requirement minimum, maximum and count.
Args:
req: Requirement
val: int value to check
Return:
bool
"""
if req.count:
if req.count > 0 and val != req.count:
return False
if req.minimum:
if req.minimum > 0 and req.minimum > val:
return False
if req.maximum:
if req.maximum > 0 and req.maximum < val:
return False
return True
def contains(self, data: Sequence[str], e: str) -> bool:
"""
Check for e in data.
Returns True if e exists in data else return False
Args:
data: Sequence of str
e: str value to check
Return:
bool
"""
data_list = list(data) if data else []
for k in data_list:
if e == k:
return True
return False
async def filter_constraints(
self,
constraints: Constraints,
credentials: Sequence[VCRecord],
) -> Sequence[VCRecord]:
"""
Return list of applicable VCRecords after applying filtering.
Args:
constraints: Constraints
credentials: Sequence of credentials
to apply filtering on
Return:
Sequence of applicable VCRecords
"""
document_loader = self.profile.inject(DocumentLoader)
result = []
for credential in credentials:
if constraints.subject_issuer == "required" and not self.subject_is_issuer(
credential=credential
):
continue
applicable = False
is_holder_field_ids = self.field_ids_for_is_holder(constraints)
for field in constraints._fields:
applicable = await self.filter_by_field(field, credential)
# all fields in the constraint should be satisfied
if not applicable:
break
# is_holder with required directive requested for this field
if applicable and field.id and field.id in is_holder_field_ids:
# Missing credentialSubject.id - cannot verify that holder of claim
# is same as subject
if not credential.subject_ids or len(credential.subject_ids) == 0:
applicable = False
break
# Holder of claim is not same as the subject
if not await self.process_constraint_holders(
subject_ids=credential.subject_ids
):
applicable = False
break
if not applicable:
continue
if constraints.limit_disclosure == "required":
credential_dict = credential.cred_value
new_credential_dict = self.reveal_doc(
credential_dict=credential_dict, constraints=constraints
)
async with self.profile.session() as session:
wallet = session.inject(BaseWallet)
derive_suite = await self._get_derive_suite(
wallet=wallet,
)
signed_new_credential_dict = await derive_credential(
credential=credential_dict,
reveal_document=new_credential_dict,
suite=derive_suite,
document_loader=document_loader,
)
credential = self.create_vcrecord(signed_new_credential_dict)
result.append(credential)
return result
def field_ids_for_is_holder(self, constraints: Constraints) -> Sequence[str]:
"""Return list of field ids for whose subject holder verification is requested."""
reqd_field_ids = set()
if not constraints.holders:
reqd_field_ids = []
return reqd_field_ids
for holder in constraints.holders:
if holder.directive == "required":
reqd_field_ids = set.union(reqd_field_ids, set(holder.field_ids))
return list(reqd_field_ids)
async def process_constraint_holders(
self,
subject_ids: Sequence[str],
) -> bool:
"""Check if holder or subject of claim still controls the identifier."""
async with self.profile.session() as session:
wallet = session.inject(BaseWallet)
try:
for subject_id in subject_ids:
await wallet.get_local_did(subject_id.replace("did:sov:", ""))
self.is_holder = True
return True
except (WalletError, WalletNotFoundError):
return False
def create_vcrecord(self, cred_dict: dict) -> VCRecord:
"""Return VCRecord from a credential dict."""
proofs = cred_dict.get("proof") or []
proof_types = None
if type(proofs) is dict:
proofs = [proofs]
if proofs:
proof_types = [proof.get("type") for proof in proofs]
contexts = [ctx for ctx in cred_dict.get("@context") if type(ctx) is str]
if "@graph" in cred_dict:
for enclosed_data in cred_dict.get("@graph"):
if (
enclosed_data["id"].startswith("urn:")
and "credentialSubject" in enclosed_data
):
cred_dict.update(enclosed_data)
del cred_dict["@graph"]
break
given_id = cred_dict.get("id")
if given_id and self.check_if_cred_id_derived(given_id):
given_id = str(uuid4())
# issuer
issuer = cred_dict.get("issuer")
if type(issuer) is dict:
issuer = issuer.get("id")
# subjects
subject_ids = None
subjects = cred_dict.get("credentialSubject")
if subjects:
if type(subjects) is dict:
subjects = [subjects]
subject_ids = [
subject.get("id") for subject in subjects if ("id" in subject)
]
else:
cred_dict["credentialSubject"] = {}
# Schemas
schemas = cred_dict.get("credentialSchema", [])
if type(schemas) is dict:
schemas = [schemas]
schema_ids = [schema.get("id") for schema in schemas]
expanded = jsonld.expand(cred_dict)
types = JsonLdProcessor.get_values(
expanded[0],
"@type",
)
return VCRecord(
contexts=contexts,
expanded_types=types,
issuer_id=issuer,
subject_ids=subject_ids,
proof_types=proof_types,
given_id=given_id,
cred_value=cred_dict,
schema_ids=schema_ids,
)
def reveal_doc(self, credential_dict: dict, constraints: Constraints):
"""Generate reveal_doc dict for deriving credential."""
derived = {
"@context": credential_dict.get("@context"),
"type": credential_dict.get("type"),
"@explicit": True,
"@requireAll": True,
"issuanceDate": {},
"issuer": {},
}
unflatten_dict = {}
for field in constraints._fields:
for path in field.paths:
jsonpath = parse(path)
match = jsonpath.find(credential_dict)
if len(match) == 0:
continue
for match_item in match:
full_path = str(match_item.full_path)
if bool(re.search(pattern=r"\[[0-9]+\]", string=full_path)):
full_path = full_path.replace(".[", "[")
unflatten_dict[full_path] = {}
explicit_key_path = None
key_list = full_path.split(".")[:-1]
for key in key_list:
if not explicit_key_path:
explicit_key_path = key
else:
explicit_key_path = explicit_key_path + "." + key
unflatten_dict[explicit_key_path + ".@explicit"] = True
unflatten_dict[explicit_key_path + ".@requireAll"] = True
derived = self.new_credential_builder(derived, unflatten_dict)
# Fix issue related to credentialSubject type property
if "credentialSubject" in derived.keys():
if "type" in credential_dict.get("credentialSubject"):
derived["credentialSubject"]["type"] = credential_dict.get(
"credentialSubject"
).get("type")
if "credentialSubject" not in derived.keys():
if isinstance(credential_dict.get("credentialSubject"), list):
derived["credentialSubject"] = []
elif isinstance(credential_dict.get("credentialSubject"), dict):
derived["credentialSubject"] = {}
return derived
def new_credential_builder(
self, new_credential: dict, unflatten_dict: dict
) -> dict:
"""
Update and return the new_credential.
Args:
new_credential: credential dict to be updated and returned
unflatten_dict: dict with traversal path as key and match_value as value
Return:
dict
"""
new_credential.update(unflatten(unflatten_dict))
return new_credential
async def filter_by_field(self, field: DIFField, credential: VCRecord) -> bool:
"""
Apply filter on VCRecord.
Checks if a credential is applicable
Args:
field: Field contains filtering spec
credential: credential to apply filtering on
Return:
bool
"""
credential_dict = credential.cred_value
for path in field.paths:
if "$.proof." in path:
raise DIFPresExchError(
"JSON Path expression matching on proof object "
"is not currently supported"
)
jsonpath = parse(path)
match = jsonpath.find(credential_dict)
if len(match) == 0:
continue
for match_item in match:
# No filter in constraint
if not field._filter:
return True
if self.validate_patch(match_item.value, field._filter):
return True
return False
def validate_patch(self, to_check: any, _filter: Filter) -> bool:
"""
Apply filter on match_value.
Utility function used in applying filtering to a cred
by triggering checks according to filter specification
Args:
to_check: value to check, extracted from match
_filter: Filter
Return:
bool
"""
return_val = False
if _filter._type:
if self.check_filter_only_type_enforced(_filter):
if _filter._type == "number":
if isinstance(to_check, (int, float)):
return True
elif _filter._type == "string":
if isinstance(to_check, str):
if _filter.fmt == "date" or _filter.fmt == "date-time":
try:
to_compare_date = dateutil_parser(to_check)
if isinstance(to_compare_date, datetime):
return True
except (ParserError, TypeError):
return False
else:
return True
else:
if _filter._type == "number":
return_val = self.process_numeric_val(to_check, _filter)
elif _filter._type == "string":
return_val = self.process_string_val(to_check, _filter)
else:
if _filter.enums:
return_val = self.enum_check(val=to_check, _filter=_filter)
if _filter.const:
return_val = self.const_check(val=to_check, _filter=_filter)
if _filter._not:
return not return_val
return return_val
def check_filter_only_type_enforced(self, _filter: Filter) -> bool:
"""
Check if only type is specified in filter.
Args:
_filter: Filter
Return:
bool
"""
if (
_filter.pattern is None
and _filter.minimum is None
and _filter.maximum is None
and _filter.min_length is None
and _filter.max_length is None
and _filter.exclusive_min is None
and _filter.exclusive_max is None
and _filter.const is None
and _filter.enums is None
):
return True
else:
return False
def process_numeric_val(self, val: any, _filter: Filter) -> bool:
"""
Trigger Filter checks.
Trigger appropriate check for a number type filter,
according to _filter spec.
Args:
val: value to check, extracted from match
_filter: Filter
Return:
bool
"""
if _filter.exclusive_max:
return self.exclusive_maximum_check(val, _filter)
elif _filter.exclusive_min:
return self.exclusive_minimum_check(val, _filter)
elif _filter.minimum:
return self.minimum_check(val, _filter)
elif _filter.maximum:
return self.maximum_check(val, _filter)
elif _filter.const:
return self.const_check(val, _filter)
elif _filter.enums:
return self.enum_check(val, _filter)
else:
return False
def process_string_val(self, val: any, _filter: Filter) -> bool:
"""
Trigger Filter checks.
Trigger appropriate check for a string type filter,
according to _filter spec.
Args:
val: value to check, extracted from match
_filter: Filter
Return:
bool
"""
if _filter.min_length or _filter.max_length:
return self.length_check(val, _filter)
elif _filter.pattern:
return self.pattern_check(val, _filter)
elif _filter.enums:
return self.enum_check(val, _filter)
elif _filter.exclusive_max and _filter.fmt:
return self.exclusive_maximum_check(val, _filter)
elif _filter.exclusive_min and _filter.fmt:
return self.exclusive_minimum_check(val, _filter)
elif _filter.minimum and _filter.fmt:
return self.minimum_check(val, _filter)
elif _filter.maximum and _filter.fmt:
return self.maximum_check(val, _filter)
elif _filter.const:
return self.const_check(val, _filter)
else:
return False
def exclusive_minimum_check(self, val: any, _filter: Filter) -> bool:
"""
Exclusiveminimum check.
Returns True if value greater than filter specified check
Args:
val: value to check, extracted from match
_filter: Filter
Return:
bool
"""
try:
if _filter.fmt:
utc = pytz.UTC
if _filter.fmt == "date" or _filter.fmt == "date-time":
to_compare_date = dateutil_parser(_filter.exclusive_min).replace(
tzinfo=utc
)
given_date = dateutil_parser(str(val)).replace(tzinfo=utc)
return given_date > to_compare_date
else:
if self.is_numeric(val):
return val > _filter.exclusive_min
return False
except (TypeError, ValueError, ParserError):
return False
def exclusive_maximum_check(self, val: any, _filter: Filter) -> bool:
"""
Exclusivemaximum check.
Returns True if value less than filter specified check
Args:
val: value to check, extracted from match
_filter: Filter
Return:
bool
"""
try:
if _filter.fmt:
utc = pytz.UTC
if _filter.fmt == "date" or _filter.fmt == "date-time":
to_compare_date = dateutil_parser(_filter.exclusive_max).replace(
tzinfo=utc
)
given_date = dateutil_parser(str(val)).replace(tzinfo=utc)
return given_date < to_compare_date
else:
if self.is_numeric(val):
return val < _filter.exclusive_max
return False
except (TypeError, ValueError, ParserError):
return False
def maximum_check(self, val: any, _filter: Filter) -> bool:
"""
Maximum check.
Returns True if value less than equal to filter specified check
Args:
val: value to check, extracted from match
_filter: Filter
Return:
bool
"""
try:
if _filter.fmt:
utc = pytz.UTC
if _filter.fmt == "date" or _filter.fmt == "date-time":
to_compare_date = dateutil_parser(_filter.maximum).replace(
tzinfo=utc
)
given_date = dateutil_parser(str(val)).replace(tzinfo=utc)
return given_date <= to_compare_date
else:
if self.is_numeric(val):
return val <= _filter.maximum
return False
except (TypeError, ValueError, ParserError):
return False
def minimum_check(self, val: any, _filter: Filter) -> bool:
"""
Minimum check.
Returns True if value greater than equal to filter specified check
Args:
val: value to check, extracted from match
_filter: Filter
Return:
bool
"""
try:
if _filter.fmt:
utc = pytz.UTC
if _filter.fmt == "date" or _filter.fmt == "date-time":
to_compare_date = dateutil_parser(_filter.minimum).replace(
tzinfo=utc
)
given_date = dateutil_parser(str(val)).replace(tzinfo=utc)
return given_date >= to_compare_date
else:
if self.is_numeric(val):
return val >= _filter.minimum
return False
except (TypeError, ValueError, ParserError):
return False
def length_check(self, val: any, _filter: Filter) -> bool:
"""
Length check.
Returns True if length value string meets the minLength and maxLength specs
Args:
val: value to check, extracted from match
_filter: Filter
Return:
bool
"""
given_len = len(str(val))
if _filter.max_length and _filter.min_length:
if given_len <= _filter.max_length and given_len >= _filter.min_length:
return True
elif _filter.max_length and not _filter.min_length:
if given_len <= _filter.max_length:
return True
elif not _filter.max_length and _filter.min_length:
if given_len >= _filter.min_length:
return True
return False
def pattern_check(self, val: any, _filter: Filter) -> bool:
"""
Pattern check.
Returns True if value string matches the specified pattern
Args:
val: value to check, extracted from match
_filter: Filter
Return:
bool
"""
if _filter.pattern:
return bool(re.search(pattern=_filter.pattern, string=str(val)))
return False
def const_check(self, val: any, _filter: Filter) -> bool:
"""
Const check.
Returns True if value is equal to filter specified check
Args:
val: value to check, extracted from match
_filter: Filter
Return:
bool
"""
if val == _filter.const:
return True
return False
def enum_check(self, val: any, _filter: Filter) -> bool:
"""
Enum check.
Returns True if value is contained to filter specified list
Args:
val: value to check, extracted from match
_filter: Filter
Return:
bool
"""
if val in _filter.enums:
return True
return False
def subject_is_issuer(self, credential: VCRecord) -> bool:
"""
subject_is_issuer check.
Returns True if cred issuer_id is in subject_ids
Args:
credential: VCRecord
Return:
bool
"""
subject_ids = credential.subject_ids
for subject_id in subject_ids:
issuer_id = credential.issuer_id
if subject_id != "" and subject_id == issuer_id:
return True
return False
async def filter_schema(
self, credentials: Sequence[VCRecord], schemas: Sequence[SchemaInputDescriptor]
) -> Sequence[VCRecord]:
"""
Filter by schema.
Returns list of credentials where credentialSchema.id or types matched
with input_descriptors.schema.uri
Args:
credentials: list of VCRecords to check
schemas: list of schemas from the input_descriptors
Return:
Sequence of filtered VCRecord
"""
result = []
for credential in credentials:
applicable = False
for schema in schemas:
applicable = self.credential_match_schema(
credential=credential, schema_id=schema.uri
)
if schema.required and not applicable:
break
if applicable:
if schema.uri in [
EXPANDED_TYPE_CREDENTIALS_CONTEXT_V1_VC_TYPE,
]:
continue
else:
break
if applicable:
result.append(credential)
return result
def credential_match_schema(self, credential: VCRecord, schema_id: str) -> bool:
"""
Credential matching by schema.
Used by filter_schema to check if credential.schema_ids or credential.types
matched with schema_id
Args:
credential: VCRecord to check
schema_id: schema uri to check
Return:
bool
"""
if schema_id in credential.schema_ids:
return True
if schema_id in credential.expanded_types:
return True
return False
async def filter_creds_record_id(
self, credentials: Sequence[VCRecord], records_list: Sequence[str]
) -> Sequence[VCRecord]:
"""Return filtered list of credentials using records_list."""
filtered_cred = []
for credential in credentials:
if credential.record_id in records_list:
filtered_cred.append(credential)
return filtered_cred
async def apply_requirements(
self,
req: Requirement,
credentials: Sequence[VCRecord],
records_filter: dict = None,
) -> dict:
"""
Apply Requirement.
Args:
req: Requirement
credentials: Sequence of credentials to check against
Return:
dict of input_descriptor ID key to list of credential_json
"""
# Dict for storing descriptor_id keys and list of applicable
# credentials values
result = {}
# Get all input_descriptors attached to the PresentationDefinition
descriptor_list = req.input_descriptors or []
for descriptor in descriptor_list:
# Filter credentials to apply filtering
# upon by matching each credentialSchema.id
# or expanded types on each InputDescriptor's schema URIs
if records_filter and (descriptor.id in records_filter):
filtered_creds_by_descriptor_id = await self.filter_creds_record_id(
credentials, records_filter.get(descriptor.id)
)
filtered_by_schema = await self.filter_schema(
credentials=filtered_creds_by_descriptor_id,
schemas=descriptor.schemas,
)
else:
filtered_by_schema = await self.filter_schema(
credentials=credentials, schemas=descriptor.schemas
)
# Filter credentials based upon path expressions specified in constraints
filtered = await self.filter_constraints(
constraints=descriptor.constraint,
credentials=filtered_by_schema,
)
if len(filtered) != 0:
result[descriptor.id] = filtered
if len(descriptor_list) != 0:
# Applies min, max or count attributes of submission_requirement
if self.is_len_applicable(req, len(result)):
return result
return {}
nested_result = []
cred_uid_descriptors = {}
# recursion logic for nested requirements
for requirement in req.nested_req:
# recursive call
result = await self.apply_requirements(
requirement, credentials, records_filter
)
if result == {}:
continue
# cred_uid_descriptors maps applicable credentials
# to their respective descriptor.
# Structure: {cred.given_id: {
# desc_id_1: {}
# },
# ......
# }
# This will be used to construct exclude dict.
for descriptor_id in result.keys():
credential_list = result.get(descriptor_id)
for credential in credential_list:
cred_id = credential.given_id or credential.record_id
if cred_id:
cred_uid_descriptors.setdefault(cred_id, {})[descriptor_id] = {}
if len(result.keys()) != 0:
nested_result.append(result)
exclude = {}
for uid in cred_uid_descriptors.keys():
# Check if number of applicable credentials
# does not meet requirement specification
if not self.is_len_applicable(req, len(cred_uid_descriptors[uid])):
for descriptor_id in cred_uid_descriptors[uid]:
# Add to exclude dict
# with cred_uid + descriptor_id as key
exclude[descriptor_id + uid] = {}
# merging credentials and excluding credentials that don't satisfy the requirement
return await self.merge_nested_results(
nested_result=nested_result, exclude=exclude
)
def is_numeric(self, val: any) -> bool:
"""
Check if val is an int or float.
Args:
val: to check
Return:
bool
"""
if isinstance(val, float) or isinstance(val, int):
return True
else:
return False
async def merge_nested_results(
self, nested_result: Sequence[dict], exclude: dict
) -> dict:
"""
Merge nested results with merged credentials.
Args:
nested_result: Sequence of dict containing input_descriptor.id as keys
and list of creds as values
exclude: dict containing info about credentials to exclude
Return:
dict with input_descriptor.id as keys and merged_credentials_list as values
"""
result = {}
for res in nested_result:
for key in res.keys():
credentials = res[key]
uid_dict = {}
merged_credentials = []
if key in result:
for credential in result[key]:
cred_id = credential.given_id or credential.record_id
if cred_id and cred_id not in uid_dict:
merged_credentials.append(credential)
uid_dict[cred_id] = {}
for credential in credentials:
cred_id = credential.given_id or credential.record_id
if cred_id and cred_id not in uid_dict:
if (key + cred_id) not in exclude:
merged_credentials.append(credential)
uid_dict[cred_id] = {}
result[key] = merged_credentials
return result
async def create_vp(
self,
credentials: Sequence[VCRecord],
pd: PresentationDefinition,
challenge: str = None,
domain: str = None,
records_filter: dict = None,
) -> dict:
"""
Create VerifiablePresentation.
Args:
credentials: Sequence of VCRecords
pd: PresentationDefinition
Return:
VerifiablePresentation
"""
document_loader = self.profile.inject(DocumentLoader)
req = await self.make_requirement(
srs=pd.submission_requirements, descriptors=pd.input_descriptors
)
result = await self.apply_requirements(
req=req, credentials=credentials, records_filter=records_filter
)
applicable_creds, descriptor_maps = await self.merge(result)
applicable_creds_list = []
for credential in applicable_creds:
applicable_creds_list.append(credential.cred_value)
if (
not self.profile.settings.get("debug.auto_respond_presentation_request")
and not records_filter
and len(applicable_creds_list) > 1
):
raise DIFPresExchError(
"Multiple credentials are applicable for presentation_definition "
f"{pd.id} and --auto-respond-presentation-request setting is not "
"enabled. Please specify which credentials should be applied to "
"which input_descriptors using record_ids filter."
)
# submission_property
submission_property = PresentationSubmission(
id=str(uuid4()), definition_id=pd.id, descriptor_maps=descriptor_maps
)
if self.is_holder:
(
issuer_id,
filtered_creds_list,
) = await self.get_sign_key_credential_subject_id(
applicable_creds=applicable_creds
)
if not issuer_id and len(filtered_creds_list) == 0:
vp = await create_presentation(credentials=applicable_creds_list)
vp["presentation_submission"] = submission_property.serialize()
if self.proof_type is BbsBlsSignature2020.signature_type:
vp["@context"].append(SECURITY_CONTEXT_BBS_URL)
return vp
else:
vp = await create_presentation(credentials=filtered_creds_list)
vp["presentation_submission"] = submission_property.serialize()
if self.proof_type is BbsBlsSignature2020.signature_type:
vp["@context"].append(SECURITY_CONTEXT_BBS_URL)
async with self.profile.session() as session:
wallet = session.inject(BaseWallet)
issue_suite = await self._get_issue_suite(
wallet=wallet,
issuer_id=issuer_id,
)
signed_vp = await sign_presentation(
presentation=vp,
suite=issue_suite,
challenge=challenge,
document_loader=document_loader,
)
return signed_vp
else:
vp = await create_presentation(credentials=applicable_creds_list)
vp["presentation_submission"] = submission_property.serialize()
if self.proof_type is BbsBlsSignature2020.signature_type:
vp["@context"].append(SECURITY_CONTEXT_BBS_URL)
if self.pres_signing_did:
async with self.profile.session() as session:
wallet = session.inject(BaseWallet)
issue_suite = await self._get_issue_suite(
wallet=wallet,
issuer_id=self.pres_signing_did,
)
signed_vp = await sign_presentation(
presentation=vp,
suite=issue_suite,
challenge=challenge,
document_loader=document_loader,
)
return signed_vp
else:
return vp
def check_if_cred_id_derived(self, id: str) -> bool:
"""Check if credential or credentialSubjet id is derived."""
if id.startswith("urn:bnid:_:c14n"):
return True
return False
async def merge(
self,
dict_descriptor_creds: dict,
) -> (Sequence[VCRecord], Sequence[InputDescriptorMapping]):
"""
Return applicable credentials and descriptor_map for attachment.
Used for generating the presentation_submission property with the
descriptor_map, mantaining the order in which applicable credential
list is returned.
Args:
dict_descriptor_creds: dict with input_descriptor.id as keys
and merged_credentials_list
Return:
Tuple of applicable credential list and descriptor map
"""
dict_of_creds = {}
dict_of_descriptors = {}
result = []
descriptors = []
sorted_desc_keys = sorted(list(dict_descriptor_creds.keys()))
for desc_id in sorted_desc_keys:
credentials = dict_descriptor_creds.get(desc_id)
for cred in credentials:
cred_id = cred.given_id or cred.record_id
if cred_id:
if cred_id not in dict_of_creds:
result.append(cred)
dict_of_creds[cred_id] = len(descriptors)
if f"{cred_id}-{cred_id}" not in dict_of_descriptors:
descriptor_map = InputDescriptorMapping(
id=desc_id,
fmt="ldp_vp",
path=(f"$.verifiableCredential[{dict_of_creds[cred_id]}]"),
)
descriptors.append(descriptor_map)
descriptors = sorted(descriptors, key=lambda i: i.id)
return (result, descriptors)
async def verify_received_pres(
self,
pd: PresentationDefinition,
pres: dict,
):
"""
Verify credentials received in presentation.
Args:
pres: received VerifiablePresentation
pd: PresentationDefinition
"""
descriptor_map_list = pres.get("presentation_submission").get("descriptor_map")
input_descriptors = pd.input_descriptors
inp_desc_id_contraint_map = {}
for input_descriptor in input_descriptors:
inp_desc_id_contraint_map[input_descriptor.id] = input_descriptor.constraint
for desc_map_item in descriptor_map_list:
desc_map_item_id = desc_map_item.get("id")
constraint = inp_desc_id_contraint_map.get(desc_map_item_id)
desc_map_item_path = desc_map_item.get("path")
jsonpath = parse(desc_map_item_path)
match = jsonpath.find(pres)
if len(match) == 0:
raise DIFPresExchError(
f"{desc_map_item_path} path in descriptor_map not applicable"
)
for match_item in match:
if not await self.apply_constraint_received_cred(
constraint, match_item.value
):
raise DIFPresExchError(
f"Constraint specified for {desc_map_item_id} does not "
f"apply to the enclosed credential in {desc_map_item_path}"
)
async def apply_constraint_received_cred(
self, constraint: Constraints, cred_dict: dict
) -> bool:
"""Evaluate constraint from the request against received credential."""
fields = constraint._fields
field_paths = []
credential = self.create_vcrecord(cred_dict)
for field in fields:
field_paths = field_paths + field.paths
if not await self.filter_by_field(field, credential):
return False
# Selective Disclosure check
if constraint.limit_disclosure == "required":
field_paths = set([path.replace("$.", "") for path in field_paths])
mandatory_paths = {
"@context",
"type",
"issuanceDate",
"issuer",
"proof",
"credentialSubject",
"id",
}
to_remove_from_field_paths = set()
nested_field_paths = {"credentialSubject": {"id", "type"}}
for field_path in field_paths:
if field_path.count(".") >= 1:
split_field_path = field_path.split(".")
key = ".".join(split_field_path[:-1])
value = split_field_path[-1]
nested_field_paths = self.build_nested_paths_dict(
key, value, nested_field_paths
)
to_remove_from_field_paths.add(field_path)
for to_remove_path in to_remove_from_field_paths:
field_paths.remove(to_remove_path)
field_paths = set.union(mandatory_paths, field_paths)
for attrs in cred_dict.keys():
if attrs not in field_paths:
return False
for nested_attr_key in nested_field_paths:
nested_attr_values = nested_field_paths[nested_attr_key]
split_nested_attr_key = nested_attr_key.split(".")
extracted_dict = self.nested_get(cred_dict, split_nested_attr_key)
for attrs in extracted_dict.keys():
if attrs not in nested_attr_values:
return False
return True
def nested_get(self, input_dict: dict, nested_key: Sequence[str]) -> dict:
"""Return internal dict from nested input_dict given list of nested_key."""
internal_dict_value = input_dict
for k in nested_key:
internal_dict_value = internal_dict_value.get(k, None)
return internal_dict_value
def build_nested_paths_dict(
self, key: str, value: str, nested_field_paths: dict
) -> dict:
"""Build and return nested_field_paths dict."""
if key in nested_field_paths.keys():
nested_field_paths[key].add(value)
else:
nested_field_paths[key] = {value}
split_key = key.split(".")
if len(split_key) > 1:
nested_field_paths.update(
self.build_nested_paths_dict(
".".join(split_key[:-1]), split_key[-1], nested_field_paths
)
)
return nested_field_paths
| 36.267666 | 90 | 0.564838 |
40d86ed0051a0fe9cfc6afac4035541b46994d05
| 1,768 |
py
|
Python
|
aliyun-python-sdk-sae/aliyunsdksae/request/v20190506/BindSlbRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-sae/aliyunsdksae/request/v20190506/BindSlbRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-sae/aliyunsdksae/request/v20190506/BindSlbRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdksae.endpoint import endpoint_data
class BindSlbRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'sae', '2019-05-06', 'BindSlb','serverless')
self.set_uri_pattern('/pop/v1/sam/app/slb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Intranet(self):
return self.get_query_params().get('Intranet')
def set_Intranet(self,Intranet):
self.add_query_param('Intranet',Intranet)
def get_AppId(self):
return self.get_query_params().get('AppId')
def set_AppId(self,AppId):
self.add_query_param('AppId',AppId)
def get_Internet(self):
return self.get_query_params().get('Internet')
def set_Internet(self,Internet):
self.add_query_param('Internet',Internet)
| 34.666667 | 74 | 0.755656 |
e1abdcc1019ab9ce190091e88f7d85b59d9eec99
| 1,453 |
py
|
Python
|
src/weather_lk/tweet.py
|
nuuuwan/weather_lk
|
f06e92bae9e77c78a3093b31f28c7944f960580f
|
[
"MIT"
] | null | null | null |
src/weather_lk/tweet.py
|
nuuuwan/weather_lk
|
f06e92bae9e77c78a3093b31f28c7944f960580f
|
[
"MIT"
] | null | null | null |
src/weather_lk/tweet.py
|
nuuuwan/weather_lk
|
f06e92bae9e77c78a3093b31f28c7944f960580f
|
[
"MIT"
] | null | null | null |
"""Tweet."""
from utils import timex, twitter
from weather_lk import PlotRain, PlotTemp, daily_weather_report
def _hash(_s):
return '#' + _s.replace(' ', '')
def _tweet():
date_id = timex.get_date_id()
data = daily_weather_report.load(date_id)
if data['max_rain']['rain'] >= 0.1:
rain_str = (
'''Highest: {max_rain_place} ({max_rain_rain:.1f}mm)'''.format(
max_rain_place=_hash(data['max_rain']['place']),
max_rain_rain=data['max_rain']['rain'],
)
)
else:
rain_str = 'No rain >0.1mm islandwide.'
tweet_text = '''Temperature & Rainfall ({date}) by @MeteoLK
Rainfall 🌧️
😅 {rain_str}
Temperature 🌡️
🥵 Highest: {max_temp_place} ({max_temp_temp:.1f}°C)
🥶 Lowest: {min_temp_place} ({min_temp_temp:.1f}°C)
(24hrs ending at 8.30am)
#lka #SriLanka'''.format(
date=data['date'],
max_temp_place=_hash(data['max_temp']['place']),
min_temp_place=_hash(data['min_temp']['place']),
max_temp_temp=data['max_temp']['temp'],
min_temp_temp=data['min_temp']['temp'],
rain_str=rain_str,
)
status_image_files = [
PlotTemp._plot(date_id),
PlotRain._plot(date_id),
]
twtr = twitter.Twitter.from_args()
twtr.tweet(
tweet_text=tweet_text,
status_image_files=status_image_files,
update_user_profile=True,
)
if __name__ == '__main__':
_tweet()
| 23.435484 | 75 | 0.606332 |
4ee359f215671a7d5effad86d4be47118ae0fc03
| 121 |
py
|
Python
|
BitTorrent-5.2.2/BitTorrent/Storage.py
|
jpabb7/p2pScrapper
|
0fd57049606864223eb45f956a58adda1231af88
|
[
"MIT"
] | 4 |
2016-04-26T03:43:54.000Z
|
2016-11-17T08:09:04.000Z
|
BitTorrent-5.2.2/BitTorrent/Storage.py
|
jpabb7/p2pScrapper
|
0fd57049606864223eb45f956a58adda1231af88
|
[
"MIT"
] | 17 |
2015-01-05T21:06:22.000Z
|
2015-12-07T20:45:44.000Z
|
BitTorrent-5.2.2/BitTorrent/Storage.py
|
jpabb7/p2pScrapper
|
0fd57049606864223eb45f956a58adda1231af88
|
[
"MIT"
] | 7 |
2015-07-28T09:17:17.000Z
|
2021-11-07T02:29:41.000Z
|
# pick a Storage subsystem
try:
from Storage_IOCP import *
except Exception, e:
from Storage_threadpool import *
| 20.166667 | 36 | 0.743802 |
1b111370fffda4bfd547bfc12f9edcd8894dc2bd
| 12,998 |
py
|
Python
|
var/spack/repos/builtin/packages/qgis/package.py
|
bjoo/spack
|
448ac2c68dc3d11331f7d20ab9b87d63fbabdb86
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 |
2019-02-10T13:47:48.000Z
|
2019-04-17T13:05:17.000Z
|
var/spack/repos/builtin/packages/qgis/package.py
|
bjoo/spack
|
448ac2c68dc3d11331f7d20ab9b87d63fbabdb86
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13 |
2021-05-12T06:16:20.000Z
|
2022-03-11T18:39:32.000Z
|
var/spack/repos/builtin/packages/qgis/package.py
|
rubendibattista/spack
|
91de23ce650ef4dd007b94f67c26e1e6901be354
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 |
2018-04-06T09:04:11.000Z
|
2020-01-24T12:52:12.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Qgis(CMakePackage):
"""QGIS is a free and open-source cross-platform desktop geographic
information system application that supports viewing, editing, and
analysis of geospatial data.
"""
homepage = "https://qgis.org"
url = "https://qgis.org/downloads/qgis-3.8.1.tar.bz2"
maintainers = ['adamjstewart', 'Sinan81']
version('3.18.2', sha256='1913e4d5596bbc8b7d143f3defb18bf376f750a71f334f69d76af5deca7ecc5d')
# Prefer latest long term release
version('3.16.5', sha256='525f469ad6e40dd7a8f09ebab5eb6a2dffc45939b99b7d937750cc04ed78d61c', preferred=True)
version('3.14.16', sha256='c9915c2e577f1812a2b35b678b123c58407e07824d73e5ec0dda13db7ca75c04')
version('3.14.0', sha256='1b76c5278def0c447c3d354149a2afe2562ac26cf0bcbe69b9e0528356d407b8')
version('3.12.3', sha256='c2b53815f9b994e1662995d1f25f90628156b996758f5471bffb74ab29a95220')
version('3.12.2', sha256='501f81715672205afd2c1a289ffc765aff96eaa8ecb49d079a58ef4d907467b8')
version('3.12.1', sha256='a7dc7af768b8960c08ce72a06c1f4ca4664f4197ce29c7fe238429e48b2881a8')
version('3.12.0', sha256='19e9c185dfe88cad7ee6e0dcf5ab7b0bbfe1672307868a53bf771e0c8f9d5e9c')
version('3.10.10', sha256='e21a778139823fb6cf12e4a38f00984fcc060f41abcd4f0af83642d566883839')
version('3.10.7', sha256='f6c02489e065bae355d2f4374b84a1624379634c34a770b6d65bf38eb7e71564')
version('3.10.6', sha256='a96791bf6615e4f8ecdbbb9a90a8ef14a12459d8c5c374ab22eb5f776f864bb5')
version('3.10.5', sha256='f3e1cc362941ec69cc21062eeaea160354ef71382b21dc4b3191c315447b4ce1')
version('3.10.4', sha256='a032e2b8144c2fd825bc26766f586cfb1bd8574bc72efd1aa8ce18dfff8b6c9f')
version('3.10.3', sha256='0869704df9120dd642996ff1ed50213ac8247650aa0640b62f8c9c581c05d7a7')
version('3.10.2', sha256='381cb01a8ac2f5379a915b124e9c830d727d2c67775ec49609c7153fe765a6f7')
version('3.10.1', sha256='466ac9fad91f266cf3b9d148f58e2adebd5b9fcfc03e6730eb72251e6c34c8ab')
version('3.10.0', sha256='25eb1c41d9fb922ffa337a720dfdceee43cf2d38409923f087c2010c9742f012')
version('3.8.3', sha256='3cca3e8483bc158cb8e972eb819a55a5734ba70f2c7da28ebc485864aafb17bd')
version('3.8.2', sha256='4d682f7625465a5b3596b3f7e83eddad86a60384fead9c81a6870704baffaddd')
version('3.8.1', sha256='d65c8e1c7471bba46f5017f261ebbef81dffb5843a24f0e7713a00f70785ea99')
version('3.4.15', sha256='81c93b72adbea41bd765294c0cdb09476a632d8b3f90101abc409ca9ea7fb04d')
version('3.4.14', sha256='e138716c7ea84011d3b28fb9c75e6a79322fb66f532246393571906a595d7261')
variant('3d', default=False, description='Build QGIS 3D library')
variant('analysis', default=True, description='Build QGIS analysis library')
variant('apidoc', default=False, description='Build QGIS API doxygen documentation')
variant('astyle', default=False, description='Contribute QGIS with astyle')
variant('bindings', default=True, description='Build Python bindings')
variant('clang_tidy', default=False, description='Use Clang tidy')
variant('core', default=True, description='Build QGIS Core')
variant('custom_widgets', default=False, description='Build QGIS custom widgets for Qt Designer')
variant('desktop', default=True, description='Build QGIS desktop')
variant('georeferencer', default=True, description='Build GeoReferencer plugin')
variant('globe', default=False, description='Build Globe plugin')
variant('grass7', default=False, description='Build with GRASS providers and plugin')
variant('gui', default=True, description='Build QGIS GUI library and everything built on top of it')
variant('internal_mdal', default=True, description='Build with MDAl support')
variant('internal_o2', default=True, description='Download and locally include source of o2 library')
variant('oauth2_plugin', default=True, description='Build OAuth2 authentication method plugin')
variant('oracle', default=False, description='Build with Oracle support')
variant('postgresql', default=True, description='Build with PostreSQL support')
variant('py_compile', default=False, description='Byte compile Python modules in staged or installed locations')
variant('qsciapi', default=True, description='Generate PyQGIS QScintilla2 API')
variant('qspatialite', default=False, description='Build QSpatialite sql driver')
variant('qt5serialport', default=True, description='Try Qt5SerialPort for GPS positioning')
variant('qtmobility', default=False, description='Build QtMobility related code')
variant('qtwebkit', default=False, description='Enable QtWebkit Support')
variant('quick', default=False, description='Build QGIS Quick library')
variant('qwtpolar', default=False, description='Build QwtPolar')
variant('server', default=False, description='Build QGIS server')
variant('staged_plugins', default=True, description='Stage-install core Python plugins to run from build directory')
variant('thread_local', default=True, description='Use std::thread_local')
variant('txt2tags', default=False, description='Generate PDF for txt2tags documentation')
# Ref. for dependencies:
# https://github.com/qgis/QGIS/blob/master/INSTALL.md
depends_on('exiv2')
depends_on('[email protected]:')
depends_on('[email protected]: +python', type=('build', 'link', 'run'))
depends_on('[email protected]:')
depends_on('libspatialindex')
depends_on('[email protected]:')
depends_on('libzip')
depends_on('libtasn1')
depends_on('[email protected]:')
depends_on('py-psycopg2', type=('build', 'run')) # TODO: is build dependency necessary?
depends_on('py-pyqt4', when='@2')
depends_on('[email protected]:', when='@3')
depends_on('py-requests', type=('build', 'run')) # TODO: is build dependency necessary?
depends_on('[email protected]:2.8', type=('build', 'run'), when='@2')
depends_on('[email protected]:', type=('build', 'run'), when='@3')
depends_on('[email protected]')
depends_on('qjson')
depends_on('qscintilla +python')
depends_on('qt+dbus')
depends_on('[email protected]:', when='@3:')
depends_on('qwt@5:')
depends_on('qwtpolar')
depends_on('[email protected]: +column_metadata')
depends_on('protobuf', when='@3.16.4:')
# Runtime python dependencies, not mentioned in install instructions
depends_on('py-pyyaml', type='run')
depends_on('py-owslib', type='run')
depends_on('py-jinja2', type='run')
depends_on('py-pygments', type='run')
# optionals
depends_on('postgresql@8:', when='+postgresql') # for PostGIS support
depends_on('gsl', when='+georeferencer') # for georeferencer
# [email protected] is the first version that supports proj@6
depends_on('grass@7:', type=('build', 'link', 'run'), when='+grass7') # for georeferencer
# The below dependencies are shown in cmake config
# hdf5 and netcdf-c together run afoul of a concretizer bug.
# netcdf-c already depends on hdf5
# depends_on('hdf5').
depends_on('netcdf-c')
# build
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type='build')
depends_on('pkgconfig', type='build')
# Take care of conflicts using depends_on
depends_on('[email protected]:5.12.99', when='@3.8')
depends_on('[email protected]:', when='@3.10.0:')
depends_on('qtkeychain@:1.5.99', when='^qt@4')
depends_on('qt@:4', when='@2')
# Help concretizer
# +qsci_api is implied by qscintilla+python dependency
depends_on('py-pyqt4 +qsci_api', when='@2')
depends_on('[email protected]: +qsci_api', when='@3')
patch('pyqt5.patch', when='@:3.14 ^qt@5')
patch('pyqt5_3165x.patch', when='@3.16.5: ^qt@5')
def cmake_args(self):
spec = self.spec
args = []
# qtwebkit module was removed from qt as of version 5.6
# needs to be compiled as a separate package
args.extend([
'-DUSE_OPENCL=OFF',
# cmake couldn't determine the following paths
'-DEXPAT_LIBRARY={0}'.format(self.spec['expat'].libs),
'-DPOSTGRESQL_PREFIX={0}'.format(
self.spec['postgresql'].prefix),
'-DQSCINTILLA_INCLUDE_DIR=' +
self.spec['qscintilla'].prefix.include,
'-DQSCINTILLA_LIBRARY=' + self.spec['qscintilla'].prefix +
'/lib/libqscintilla2_qt5.so',
'-DLIBZIP_INCLUDE_DIR=' +
self.spec['libzip'].prefix.include,
'-DLIBZIP_CONF_INCLUDE_DIR=' +
self.spec['libzip'].prefix.lib.libzip.include,
'-DGDAL_CONFIG_PREFER_PATH=' +
self.spec['gdal'].prefix.bin,
'-DGEOS_CONFIG_PREFER_PATH=' +
self.spec['geos'].prefix.bin,
'-DGSL_CONFIG_PREFER_PATH=' + self.spec['gsl'].prefix.bin,
'-DPOSTGRES_CONFIG_PREFER_PATH=' +
self.spec['postgresql'].prefix.bin
])
args.extend([
'-DWITH_3D={0}'.format(
'TRUE' if '+3d' in spec else 'FALSE'),
'-DWITH_ANALYSIS={0}'.format(
'TRUE' if '+analysis' in spec else 'FALSE'),
'-DWITH_APIDOC={0}'.format(
'TRUE' if '+apidoc' in spec else 'FALSE'),
'-DWITH_ASTYLE={0}'.format(
'TRUE' if '+astyle' in spec else 'FALSE'),
'-DWITH_BINDINGS={0}'.format(
'TRUE' if '+bindings' in spec else 'FALSE'),
'-DWITH_CLANG_TIDY={0}'.format(
'TRUE' if '+clang_tidy' in spec else 'FALSE'),
'-DWITH_CORE={0}'.format(
'TRUE' if '+core' in spec else 'FALSE'),
'-DWITH_CUSTOM_WIDGETS={0}'.format(
'TRUE' if '+custom_widgets' in spec else 'FALSE'),
'-DWITH_DESKTOP={0}'.format(
'TRUE' if '+desktop' in spec else 'FALSE'),
'-DWITH_GEOREFERENCER={0}'.format(
'TRUE' if '+georeferencer' in spec else 'FALSE'),
'-DWITH_GLOBE={0}'.format(
'TRUE' if '+globe' in spec else 'FALSE'),
'-DWITH_GUI={0}'.format(
'TRUE' if '+gui' in spec else 'FALSE'),
'-DWITH_INTERNAL_MDAL={0}'.format(
'TRUE' if '+internal_mdal' in spec else 'FALSE'),
'-DWITH_INTERNAL_O2={0}'.format(
'ON' if '+internal_o2' in spec else 'OFF'),
'-DWITH_OAUTH2_PLUGIN={0}'.format(
'TRUE' if '+oauth2_plugin' in spec else 'FALSE'),
'-DWITH_ORACLE={0}'.format(
'TRUE' if '+oracle' in spec else 'FALSE'),
'-DWITH_POSTGRESQL={0}'.format(
'TRUE' if '+postgresql' in spec else 'FALSE'),
'-DWITH_PY_COMPILE={0}'.format(
'TRUE' if '+py_compile' in spec else 'FALSE'),
'-DWITH_QSCIAPI={0}'.format(
'TRUE' if '+qsciapi' in spec else 'FALSE'),
'-DWITH_QSPATIALITE={0}'.format(
'ON' if '+qspatialite' in spec else 'OFF'),
'-DWITH_QT5SERIALPORT={0}'.format(
'TRUE' if '+qt5serialport' in spec else 'FALSE'),
'-DWITH_QTMOBILITY={0}'.format(
'TRUE' if '+qtmobility' in spec else 'FALSE'),
'-DWITH_QTWEBKIT={0}'.format(
'ON' if '+qtwebkit' in spec else 'OFF'),
'-DWITH_QUICK={0}'.format(
'TRUE' if '+quick' in spec else 'FALSE'),
'-DWITH_QWTPOLAR={0}'.format(
'TRUE' if '+qwtpolar' in spec else 'FALSE'),
'-DWITH_SERVER={0}'.format(
'TRUE' if '+server' in spec else 'FALSE'),
'-DWITH_STAGED_PLUGINS={0}'.format(
'TRUE' if '+staged_plugins' in spec else 'FALSE'),
'-DWITH_THREAD_LOCAL={0}'.format(
'TRUE' if '+thread_local' in spec else 'FALSE'),
'-DWITH_TXT2TAGS_PDF={0}'.format(
'TRUE' if '+txt2tags_pdf' in spec else 'FALSE'),
])
if '+grass7' in self.spec:
args.extend([
'-DWITH_GRASS7=ON',
'-DGRASS_PREFIX7={0}'.format(self.spec['grass'].prefix),
'-DGRASS_INCLUDE_DIR7={0}'.format(
self.spec['grass'].prefix.include)
])
else:
args.append('-DWITH_GRASS7=OFF')
return args
| 54.613445 | 124 | 0.625789 |
fa60ce09d8663522326bcd3580d3946b43015d4c
| 4,610 |
py
|
Python
|
nipype/interfaces/slicer/filtering/n4itkbiasfieldcorrection.py
|
hanke/nipype
|
71fb90a1fd55e7c6a42e0315ba6e603d8301b6ab
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/interfaces/slicer/filtering/n4itkbiasfieldcorrection.py
|
hanke/nipype
|
71fb90a1fd55e7c6a42e0315ba6e603d8301b6ab
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/interfaces/slicer/filtering/n4itkbiasfieldcorrection.py
|
hanke/nipype
|
71fb90a1fd55e7c6a42e0315ba6e603d8301b6ab
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath
import os
class N4ITKBiasFieldCorrectionInputSpec(CommandLineInputSpec):
inputimage = File(desc="Input image where you observe signal inhomegeneity", exists=True, argstr="--inputimage %s")
maskimage = File(desc="Binary mask that defines the structure of your interest. NOTE: This parameter is OPTIONAL. If the mask is not specified, the module will use internally Otsu thresholding to define this mask. Better processing results can often be obtained when a meaningful mask is defined.", exists=True, argstr="--maskimage %s")
outputimage = traits.Either(traits.Bool, File(), hash_files=False, desc="Result of processing", argstr="--outputimage %s")
outputbiasfield = traits.Either(traits.Bool, File(), hash_files=False, desc="Recovered bias field (OPTIONAL)", argstr="--outputbiasfield %s")
iterations = InputMultiPath(traits.Int, desc="Maximum number of iterations at each level of resolution. Larger values will increase execution time, but may lead to better results.", sep=",", argstr="--iterations %s")
convergencethreshold = traits.Float(desc="Stopping criterion for the iterative bias estimation. Larger values will lead to smaller execution time.", argstr="--convergencethreshold %f")
meshresolution = InputMultiPath(traits.Float, desc="Resolution of the initial bspline grid defined as a sequence of three numbers. The actual resolution will be defined by adding the bspline order (default is 3) to the resolution in each dimension specified here. For example, 1,1,1 will result in a 4x4x4 grid of control points. This parameter may need to be adjusted based on your input image. In the multi-resolution N4 framework, the resolution of the bspline grid at subsequent iterations will be doubled. The number of resolutions is implicitly defined by Number of iterations parameter (the size of this list is the number of resolutions)", sep=",", argstr="--meshresolution %s")
splinedistance = traits.Float(desc="An alternative means to define the spline grid, by setting the distance between the control points. This parameter is used only if the grid resolution is not specified.", argstr="--splinedistance %f")
shrinkfactor = traits.Int(desc="Defines how much the image should be upsampled before estimating the inhomogeneity field. Increase if you want to reduce the execution time. 1 corresponds to the original resolution. Larger values will significantly reduce the computation time.", argstr="--shrinkfactor %d")
bsplineorder = traits.Int(desc="Order of B-spline used in the approximation. Larger values will lead to longer execution times, may result in overfitting and poor result.", argstr="--bsplineorder %d")
weightimage = File(desc="Weight Image", exists=True, argstr="--weightimage %s")
histogramsharpening = InputMultiPath(traits.Float, desc="A vector of up to three values. Non-zero values correspond to Bias Field Full Width at Half Maximum, Wiener filter noise, and Number of histogram bins.", sep=",", argstr="--histogramsharpening %s")
class N4ITKBiasFieldCorrectionOutputSpec(TraitedSpec):
outputimage = File(desc="Result of processing", exists=True)
outputbiasfield = File(desc="Recovered bias field (OPTIONAL)", exists=True)
class N4ITKBiasFieldCorrection(SEMLikeCommandLine):
"""title: N4ITK MRI Bias correction
category: Filtering
description: Performs image bias correction using N4 algorithm. This module is based on the ITK filters contributed in the following publication: Tustison N, Gee J "N4ITK: Nick's N3 ITK Implementation For MRI Bias Field Correction", The Insight Journal 2009 January-June, http://hdl.handle.net/10380/3053
version: 9
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/N4ITKBiasFieldCorrection
contributor: Nick Tustison (UPenn), Andrey Fedorov (SPL, BWH), Ron Kikinis (SPL, BWH)
acknowledgements: The development of this module was partially supported by NIH grants R01 AA016748-01, R01 CA111288 and U01 CA151261 as well as by NA-MIC, NAC, NCIGT and the Slicer community.
"""
input_spec = N4ITKBiasFieldCorrectionInputSpec
output_spec = N4ITKBiasFieldCorrectionOutputSpec
_cmd = "N4ITKBiasFieldCorrection "
_outputs_filenames = {'outputimage':'outputimage.nii','outputbiasfield':'outputbiasfield.nii'}
| 92.2 | 690 | 0.780043 |
f89d316ac52e44259382a852ae187900d91d3d4f
| 3,074 |
py
|
Python
|
harvey/webhooks.py
|
gurneesh/harvey
|
393308bfc2a833ddbbfe7aca4ddf157a7593aa73
|
[
"MIT"
] | null | null | null |
harvey/webhooks.py
|
gurneesh/harvey
|
393308bfc2a833ddbbfe7aca4ddf157a7593aa73
|
[
"MIT"
] | null | null | null |
harvey/webhooks.py
|
gurneesh/harvey
|
393308bfc2a833ddbbfe7aca4ddf157a7593aa73
|
[
"MIT"
] | null | null | null |
import hashlib
import hmac
import os
from threading import Thread
from harvey.globals import Global
from harvey.pipelines import Pipeline
WEBHOOK_SECRET = os.getenv('WEBHOOK_SECRET')
class Webhook:
@staticmethod
def parse_webhook(request, use_compose):
"""Parse a webhook's data. Return success or error status.
1. Check if the request came from a GitHub webhook (optional)
2. Check if the payload is valid JSON
3. Check if the branch is in the allowed set of branches to runa pipeline from
4. Check if the webhook secret matches (optional)
"""
# TODO: Restructure this function so it can be used for more than starting a pipeline
success = False
message = 'Server-side error.'
status_code = 500
payload_data = request.data
payload_json = request.json
payload_ip_address = request.remote_addr
signature = request.headers.get('X-Hub-Signature')
if Global.FILTER_WEBHOOKS and payload_ip_address not in Global.github_webhook_ip_ranges():
message = 'Webhook did not originate from GitHub.'
status_code = 422
elif payload_data and payload_json:
if Global.APP_MODE != 'test' and not Webhook.decode_webhook(payload_data, signature):
message = 'The X-Hub-Signature did not match the WEBHOOK_SECRET.'
status_code = 403
# TODO: Allow the user to configure whatever branch they'd like to pull from or
# a list of branches that can be pulled from
elif payload_json['ref'] in Global.ALLOWED_BRANCHES:
# TODO: It appears that you must provide a secret, add an option for those that
# don't want to use a secret
if Global.APP_MODE == 'test' or Webhook.decode_webhook(payload_data, signature):
Thread(
target=Pipeline.start_pipeline,
args=(
payload_json,
use_compose,
),
).start()
message = f'Started pipeline for {payload_json["repository"]["name"]}'
status_code = 200
success = True
else:
message = 'Harvey can only pull from the "master" or "main" branch of a repo.'
status_code = 422
else:
message = 'Malformed or missing JSON data in webhook.'
status_code = 422
response = {
'success': success,
'message': message,
}, status_code
return response
@staticmethod
def decode_webhook(data, signature):
"""Decode a webhook's secret key"""
if signature:
secret = bytes(WEBHOOK_SECRET, 'UTF-8')
mac = hmac.new(secret, msg=data, digestmod=hashlib.sha1)
digest = 'sha1=' + mac.hexdigest()
return hmac.compare_digest(digest, signature)
else:
return False
| 39.410256 | 98 | 0.590111 |
01344a994cfcb84b775067b453c1bff3aef617ae
| 2,372 |
py
|
Python
|
Samples/london-no2.py
|
FutureCitiesCatapult/digital-connector-python
|
87893f331ecdebe206530455b3bdae9538e56ba6
|
[
"MIT"
] | 7 |
2018-01-05T17:31:15.000Z
|
2018-05-10T13:40:36.000Z
|
Samples/london-no2.py
|
FutureCitiesCatapult/digital-connector-python
|
87893f331ecdebe206530455b3bdae9538e56ba6
|
[
"MIT"
] | 28 |
2018-01-08T10:43:03.000Z
|
2018-05-11T11:04:56.000Z
|
Samples/london-no2.py
|
FutureCitiesCatapult/digital-connector-python
|
87893f331ecdebe206530455b3bdae9538e56ba6
|
[
"MIT"
] | 3 |
2018-02-22T16:20:06.000Z
|
2018-03-17T10:05:08.000Z
|
'''
This recipe will get you started with Digital Connector,
it uses the minimal objects required to build a useful recipe.
The recipe aims at giving the latest value of No2 40 ug/m3 annual mean
for every LocalAuthority
To know more Digital Connector visit https://github.com/FutureCitiesCatapult/TomboloDigitalConnector/blob/master/documentation/README.md
and to know more about its entities like Subject, Attribute, Datasources,
please visit https://github.com/FutureCitiesCatapult/TomboloDigitalConnector/blob/master/documentation/recipe-language.md
'''
from os import path, pardir
import sys
sys.path.append(path.join(path.dirname(path.realpath(__file__)), pardir))
# Declaring the path of Digital Connector
# Declaring the path of the generated recipe, that would save the recipe in json format
# before sending it to DC.
# Declaring the location of the outut file
# Note: all three declaration should be relative to user's home directory
tombolo_path = 'Desktop/TomboloDigitalConnector'
recipe_output_location = 'Desktop/london-no2.json'
model_output = 'Desktop/london-no2.geojson'
# importing the required classes for building the recipe
from recipe import Recipe, Field, Datasource, AttributeMatcher, Subject, Match_Rule, LatestValueField, Dataset
# Creating Subject and Datasources to tell DC which importer need to be called
# in order to dowload datasets. For more info please refer to documentation
subjects = Subject(subject_type_label='airQualityControl', provider_label='erg.kcl.ac.uk')
datasources = Datasource(importer_class='uk.org.tombolo.importer.lac.LAQNImporter', datasource_id='airQualityControl')
# Creating Attribute to tell DC what exactly does it needs to search in the database
# Creating LatestValueField in order to get the Latest Record saved for NO2 40 mg for that year
# and passing it an object of Attribute matcher
attribute_matcher = AttributeMatcher(provider='erg.kcl.ac.uk', label='NO2 40 ug/m3 as an annual mean')
lvf = LatestValueField(attribute_matcher=attribute_matcher, label='Anual NO2')
# Passing everything to a Dataset Object as a list and building and running the recipe
# in one single step
dataset = Dataset(subjects=[subjects], datasources=[datasources], fields=[lvf])
dataset.build_and_run(tombolo_path=tombolo_path, model_output_location=model_output,
recipe_console_print=True)
| 52.711111 | 137 | 0.801012 |
b03ca3c30f58633f9dacac7c4901c531576e0107
| 1,918 |
py
|
Python
|
setup.py
|
EvaSDK/django-pipeline
|
b6b16b94aa713806f8bc3a7bff60dbf1a2325af5
|
[
"MIT"
] | null | null | null |
setup.py
|
EvaSDK/django-pipeline
|
b6b16b94aa713806f8bc3a7bff60dbf1a2325af5
|
[
"MIT"
] | null | null | null |
setup.py
|
EvaSDK/django-pipeline
|
b6b16b94aa713806f8bc3a7bff60dbf1a2325af5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import io
from setuptools import setup, find_packages
import sys
setup(
name='django-pipeline',
version='1.6.13.post1',
description='Pipeline is an asset packaging library for Django.',
long_description=io.open('README.rst', encoding='utf-8').read() + '\n\n' +
io.open('HISTORY.rst', encoding='utf-8').read(),
author='Timothée Peignier',
author_email='[email protected]',
url='https://github.com/jazzband/django-pipeline',
license='MIT',
packages=find_packages(exclude=['tests', 'tests.tests']),
zip_safe=False,
install_requires=[],
extras_require={
':python_version<"3.2"': ['futures>=2.1.3'],
},
include_package_data=True,
keywords=('django pipeline asset compiling concatenation compression'
' packaging'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.6',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]
)
| 36.188679 | 78 | 0.593848 |
f8d5a1c8c966feb9f232f9de4066644e79f4a3f8
| 3,012 |
py
|
Python
|
nvidia-efa-ami_base/cloudwatch/nvidia/accel-to-cw.py
|
yuzhang66/aws-efa-nccl-baseami-pipeline
|
6af4993d0ebd17ce0475cf31f2c139c71c3b821c
|
[
"MIT-0"
] | 1 |
2021-07-11T00:31:12.000Z
|
2021-07-11T00:31:12.000Z
|
nvidia-efa-ami_base/cloudwatch/accel-to-cw.py
|
QPC-database/aws-efa-nccl-baseami-pipeline
|
bcc76645f7b51021eced19fc050a113c6e030007
|
[
"MIT-0"
] | null | null | null |
nvidia-efa-ami_base/cloudwatch/accel-to-cw.py
|
QPC-database/aws-efa-nccl-baseami-pipeline
|
bcc76645f7b51021eced19fc050a113c6e030007
|
[
"MIT-0"
] | null | null | null |
import boto3
import os
from datetime import datetime
from time import sleep
import sys
import subprocess
import json
import gzip
import urllib
BINARY_PATH=sys.argv[1]
#endpoint_url = "http://localhost:8000/"
### CHOOSE REGION ####
#EC2_REGION = 'us-east-1'
###CHOOSE NAMESPACE PARMETERS HERE###
my_NameSpace = 'AcceleratorMetrics'
### CHOOSE PUSH INTERVAL ####
sleep_interval = 10
### CHOOSE STORAGE RESOLUTION (BETWEEN 1-60) ####
store_reso = 1
### INSTANCE INFO
BASE_URL = 'http://169.254.169.254/latest/meta-data/'
INSTANCE_ID = urllib.request.urlopen(BASE_URL + 'instance-id').read().decode('utf8')
INSTANCE_TYPE = urllib.request.urlopen(BASE_URL + 'instance-type').read().decode('utf8')
INSTANCE_AZ = urllib.request.urlopen(BASE_URL + 'placement/availability-zone').read().decode('utf8')
print(INSTANCE_AZ)
EC2_REGION = INSTANCE_AZ[:-1]
cloudwatch = boto3.client('cloudwatch', region_name=EC2_REGION)
#event_system = cloudwatch.meta.events
PUSH_TO_CW = True
def create_metric_shard(i,d,n,m):
metric_shard=[]
MY_DIMENSIONS=[
{
'Name': 'Id',
'Value': INSTANCE_ID
},
{
'Name': 'InstanceType',
'Value': INSTANCE_TYPE
},
{
'Name': 'AcceleratorIndex',
'Value': str(i)
},
{
'Name': 'AcceleratorName',
'Value': str(n)
},
{
'Name': 'AcceleratorDriver',
'Value': str(d)
}
]
for key, value in m.items():
a={'MetricName':key,'Dimensions':MY_DIMENSIONS,'Unit':'None','StorageResolution': store_reso,'Value':int(value)}
metric_shard.append(a)
return metric_shard
def gzip_request_body(request, **kwargs):
gzipped_body = gzip.compress(request.body)
request.headers.add_header('Content-Encoding', 'gzip')
request.data = gzipped_body
def logResults(metric_shard):
if (PUSH_TO_CW):
# event_system.register('before-sign.cloudwatch.PutMetricData', gzip_request_body)
cloudwatch.put_metric_data(
Namespace=my_NameSpace,
MetricData=metric_shard
)
def main():
while True:
PUSH_TO_CW = True
accel_metric_list=[]
accel_metric_shard=subprocess.check_output(BINARY_PATH, universal_newlines=True)
accel_metric_list=accel_metric_shard.splitlines()
for accel in range(len(accel_metric_list)):
d=json.loads(accel_metric_list[accel])
ametric_shard=create_metric_shard(d['Gpu_index'],d['Driver'],d['Gpu_name'],d['Metrics'])
print(ametric_shard)
logResults(ametric_shard)
sleep(sleep_interval)
if __name__=='__main__':
main()
| 30.734694 | 120 | 0.581673 |
6efc19be38345ee2d142c3df56d9dd98da9e789e
| 4,217 |
py
|
Python
|
build/env/lib/python2.7/site-packages/windmill-1.3-py2.7.egg/windmill/conf/__init__.py
|
lumanjiao/XLS_BigData
|
2c4c37872b8636df1c8b0e005bc12a635a753c7a
|
[
"Apache-2.0"
] | 11 |
2019-03-20T07:38:35.000Z
|
2021-06-18T09:42:46.000Z
|
build/env/lib/python2.6/site-packages/windmill-1.3-py2.6.egg/windmill/conf/__init__.py
|
lockhart39/HueQualityAndIngestionApp
|
c75e55a43a8bdeb7aa0f5bf2101ec72b01dcac1c
|
[
"Apache-2.0"
] | null | null | null |
build/env/lib/python2.6/site-packages/windmill-1.3-py2.6.egg/windmill/conf/__init__.py
|
lockhart39/HueQualityAndIngestionApp
|
c75e55a43a8bdeb7aa0f5bf2101ec72b01dcac1c
|
[
"Apache-2.0"
] | 5 |
2019-06-29T03:13:02.000Z
|
2020-04-23T04:47:11.000Z
|
# Copyright (c) 2006-2007 Open Source Applications Foundation
# Copyright (c) 2008-2009 Mikeal Rogers <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import local_settings
def _load(globalName='global_settings.py', localSettings=None):
"""
Helper function to load and return globalSettings and localSettings
Used to allow the merge routine to be called from a test setup.
"""
from windmill.conf import global_settings as globalSettings
if localSettings is not None:
localPath = os.path.dirname(localSettings)
localFile = os.path.basename(localSettings)
localSettings = local_settings.loadSettings(dirname=localPath, filename=localFile)
else:
localSettings = local_settings.loadSettings()
return globalSettings, localSettings
def mergeSettings(windmillSettings, globalSettings, localSettings):
"""
Merge the global and local settings with the dictionary of windmill defaults.
globalSettings and localSettings can be dictionaries but normally are
settings modules that have been loaded into memory.
"""
def _get(item, isDict, key):
if isDict:
return item[key]
else:
return getattr(item, key)
globalKeys = {}
localKeys = {}
globalDict = type(globalSettings) is dict
localDict = type(localSettings) is dict
if globalDict:
keylist = globalSettings.keys()
else:
keylist = dir(globalSettings)
for key in keylist:
if not key.startswith('__'):
globalKeys[key.upper()] = key
if localDict:
keylist = localSettings.keys()
else:
keylist = dir(localSettings)
for key in keylist:
if not key.startswith('__'):
localKeys[key.upper()] = key
for key in windmillSettings:
key_upper = key.upper()
if key_upper in localKeys:
windmillSettings[key] = _get(localSettings, localDict, localKeys[key_upper])
del localKeys[key_upper]
elif key_upper in globalKeys:
windmillSettings[key] = _get(globalSettings, globalDict, globalKeys[key_upper])
del globalKeys[key_upper]
for key_upper in localKeys:
key = localKeys[key_upper]
windmillSettings[key] = _get(localSettings, localDict, key)
if key_upper in globalKeys:
del globalKeys[key_upper]
for key_upper in globalKeys:
key = globalKeys[key_upper]
windmillSettings[key] = _get(globalSettings, globalDict, key)
def configure_settings(localSettings=None, windmill_settings={}):
"""
Override global settings with any locals and configure the windmill_settings dict.
"""
globalSettings, localSettings = _load(localSettings=localSettings)
mergeSettings(windmill_settings, globalSettings, localSettings)
return windmill_settings
if __name__ == '__main__':
import sys
if '--test' in sys.argv:
ls = { 'localonly': 1, 'all': 1, 'localglobal': 1}
gs = { 'globalonly': 2, 'all': 2, 'localglobal': 2}
ws = { 'wsonly': 3, 'all': 3}
cv = { 'localonly': 1, 'globalonly': 2, 'localglobal': 1, 'all': 2, 'wsonly': 3}
print 'windmill', ws
print 'global ', gs
print 'local ', ls
mergeSettings(ws, gs, ls)
print 'should be: ', cv
print 'actually is:', ws
for key in ws:
if key not in cv:
print 'key %s found in ws but not in cv' % key
else:
if ws[key] <> cv[key]:
print 'key %s differs: is %d but should be %d' % (key, ws[key], cv[key])
| 31.237037 | 92 | 0.648802 |
7d70fe2e3586a27e1c256a98cb527e2851ce5b1d
| 2,407 |
py
|
Python
|
src/pipx/constants.py
|
mindywallen699/pipx
|
5c3cc823ab780f4d562e5e8c1ed0c31aee893133
|
[
"MIT"
] | null | null | null |
src/pipx/constants.py
|
mindywallen699/pipx
|
5c3cc823ab780f4d562e5e8c1ed0c31aee893133
|
[
"MIT"
] | null | null | null |
src/pipx/constants.py
|
mindywallen699/pipx
|
5c3cc823ab780f4d562e5e8c1ed0c31aee893133
|
[
"MIT"
] | null | null | null |
import os
import sys
from pathlib import Path
from textwrap import dedent
from typing import NewType
DEFAULT_PIPX_HOME = Path.home() / ".local/pipx"
DEFAULT_PIPX_BIN_DIR = Path.home() / ".local/bin"
PIPX_HOME = Path(os.environ.get("PIPX_HOME", DEFAULT_PIPX_HOME)).resolve()
PIPX_LOCAL_VENVS = PIPX_HOME / "venvs"
PIPX_LOG_DIR = PIPX_HOME / "logs"
DEFAULT_PIPX_SHARED_LIBS = PIPX_HOME / "shared"
PIPX_SHARED_LIBS = Path(
os.environ.get("PIPX_SHARED_LIBS", DEFAULT_PIPX_SHARED_LIBS)
).resolve()
PIPX_SHARED_PTH = "pipx_shared.pth"
LOCAL_BIN_DIR = Path(os.environ.get("PIPX_BIN_DIR", DEFAULT_PIPX_BIN_DIR)).resolve()
PIPX_VENV_CACHEDIR = PIPX_HOME / ".cache"
TEMP_VENV_EXPIRATION_THRESHOLD_DAYS = 14
ExitCode = NewType("ExitCode", int)
# pipx shell exit codes
EXIT_CODE_OK = ExitCode(0)
EXIT_CODE_INJECT_ERROR = ExitCode(1)
EXIT_CODE_INSTALL_VENV_EXISTS = ExitCode(1)
EXIT_CODE_LIST_PROBLEM = ExitCode(1)
EXIT_CODE_UNINSTALL_VENV_NONEXISTENT = ExitCode(1)
EXIT_CODE_UNINSTALL_ERROR = ExitCode(1)
EXIT_CODE_REINSTALL_VENV_NONEXISTENT = ExitCode(1)
def is_windows() -> bool:
try:
WindowsError # noqa
except NameError:
return False
else:
return True
WINDOWS: bool = is_windows()
def strtobool(val: str) -> bool:
val = val.lower()
if val in ("y", "yes", "t", "true", "on", "1"):
return True
elif val in ("n", "no", "f", "false", "off", "0"):
return False
else:
return False
def use_emjois() -> bool:
platform_emoji_support = not is_windows() and sys.getdefaultencoding() == "utf-8"
return strtobool(str(os.getenv("USE_EMOJI", platform_emoji_support)))
emoji_support = use_emjois()
completion_instructions = dedent(
"""
Add the appropriate command to your shell's config file
so that it is run on startup. You will likely have to restart
or re-login for the autocompletion to start working.
bash:
eval "$(register-python-argcomplete pipx)"
zsh:
To activate completions for zsh you need to have
bashcompinit enabled in zsh:
autoload -U bashcompinit
bashcompinit
Afterwards you can enable completion for pipx:
eval "$(register-python-argcomplete pipx)"
tcsh:
eval `register-python-argcomplete --shell tcsh pipx`
fish:
# Not required to be in the config file, only run once
register-python-argcomplete --shell fish pipx >~/.config/fish/completions/pipx.fish
"""
)
| 26.744444 | 87 | 0.723307 |
ab573a05eff0ad7f819e5622cdb8d8c4df7f49cd
| 2,410 |
py
|
Python
|
tests/test_json.py
|
jdewells/jschon
|
14c5415c77b2f1e531bedd8aeeb8051fde7efb3e
|
[
"MIT"
] | null | null | null |
tests/test_json.py
|
jdewells/jschon
|
14c5415c77b2f1e531bedd8aeeb8051fde7efb3e
|
[
"MIT"
] | null | null | null |
tests/test_json.py
|
jdewells/jschon
|
14c5415c77b2f1e531bedd8aeeb8051fde7efb3e
|
[
"MIT"
] | null | null | null |
import json as jsonlib
import tempfile
from decimal import Decimal
from typing import Optional
import pytest
from hypothesis import given
from jschon import JSON, JSONPointer
from jschon.json import AnyJSONCompatible
from tests.strategies import json, json_nodecimal
from tests.test_jsonpointer import jsonpointer_escape
def assert_json_node(
inst: JSON,
val: AnyJSONCompatible,
parent: Optional[JSON],
key: Optional[str],
ptr: str,
):
assert inst.value == (Decimal(f'{val}') if isinstance(val, float) else val)
assert inst.parent == parent
assert inst.key == key
assert inst.path == JSONPointer(ptr)
if val is None:
assert inst.type == "null"
elif isinstance(val, bool):
assert inst.type == "boolean"
elif isinstance(val, (int, float, Decimal)):
assert inst.type == "number"
elif isinstance(val, str):
assert inst.type == "string"
elif isinstance(val, list):
assert inst.type == "array"
for i, el in enumerate(val):
assert_json_node(inst[i], el, inst, str(i), f'{inst.path}/{i}')
elif isinstance(val, dict):
assert inst.type == "object"
for k, v in val.items():
assert_json_node(inst[k], v, inst, k, f'{inst.path}/{jsonpointer_escape(k)}')
else:
assert False
assert bool(inst) == bool(val)
if isinstance(val, (str, list, dict)):
assert len(inst) == len(val)
else:
with pytest.raises(TypeError):
len(inst)
if not isinstance(val, (list, dict)):
with pytest.raises(TypeError):
iter(inst)
if not isinstance(val, list):
with pytest.raises(TypeError):
_ = inst[0]
if not isinstance(val, dict):
with pytest.raises(TypeError):
_ = inst['']
@given(json)
def test_create_json(value):
instance = JSON(value)
assert_json_node(instance, value, None, None, '')
@given(json_nodecimal)
def test_load_json_from_string(value):
s = jsonlib.dumps(value)
instance = JSON.loads(s)
assert_json_node(instance, value, None, None, '')
@given(json_nodecimal)
def test_load_json_from_file(value):
s = jsonlib.dumps(value)
with tempfile.NamedTemporaryFile() as f:
f.write(s.encode())
f.flush()
instance = JSON.loadf(f.name)
assert_json_node(instance, value, None, None, '')
| 27.386364 | 89 | 0.63527 |
2d9cf8636069d5310b0bc7f0ede150e4c49a4737
| 6,321 |
py
|
Python
|
src/olympia/conf/prod/settings.py
|
jpetto/olympia
|
f4e9badac9634657068dfbd4733ab5d17798e3f6
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/conf/prod/settings.py
|
jpetto/olympia
|
f4e9badac9634657068dfbd4733ab5d17798e3f6
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/conf/prod/settings.py
|
jpetto/olympia
|
f4e9badac9634657068dfbd4733ab5d17798e3f6
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import os
import environ
import datetime
from olympia.lib.settings_base import * # noqa
environ.Env.read_env(env_file='/etc/olympia/settings.env')
env = environ.Env()
ENGAGE_ROBOTS = True
EMAIL_URL = env.email_url('EMAIL_URL')
EMAIL_HOST = EMAIL_URL['EMAIL_HOST']
EMAIL_PORT = EMAIL_URL['EMAIL_PORT']
EMAIL_BACKEND = EMAIL_URL['EMAIL_BACKEND']
EMAIL_HOST_USER = EMAIL_URL['EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = EMAIL_URL['EMAIL_HOST_PASSWORD']
EMAIL_BLACKLIST = env.list('EMAIL_BLACKLIST')
SEND_REAL_EMAIL = True
ENV = env('ENV')
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = False
SESSION_COOKIE_SECURE = True
ADMINS = ()
API_THROTTLE = False
REDIRECT_SECRET_KEY = env('REDIRECT_SECRET_KEY')
DOMAIN = env('DOMAIN', default='addons.mozilla.org')
CRONJOB_LOCK_PREFIX = DOMAIN
SERVER_EMAIL = '[email protected]'
SITE_URL = 'https://' + DOMAIN
SERVICES_URL = 'https://services.addons.mozilla.org'
STATIC_URL = 'https://addons.cdn.mozilla.net/static/'
MEDIA_URL = 'https://addons.cdn.mozilla.net/user-media/'
SESSION_COOKIE_DOMAIN = ".%s" % DOMAIN
SYSLOG_TAG = "http_app_addons"
SYSLOG_TAG2 = "http_app_addons_timer"
SYSLOG_CSP = "http_app_addons_csp"
DATABASES = {}
DATABASES['default'] = env.db('DATABASES_DEFAULT_URL')
DATABASES['default']['ENGINE'] = 'mysql_pool'
# Run all views in a transaction (on master) unless they are decorated not to.
DATABASES['default']['ATOMIC_REQUESTS'] = True
DATABASES['slave'] = env.db('DATABASES_SLAVE_URL')
# Do not open a transaction for every view on the slave DB.
DATABASES['slave']['ATOMIC_REQUESTS'] = False
DATABASES['slave']['ENGINE'] = 'mysql_pool'
DATABASES['slave']['sa_pool_key'] = 'slave'
DATABASE_POOL_ARGS = {
'max_overflow': 10,
'pool_size': 5,
'recycle': 300
}
SERVICES_DATABASE = env.db('SERVICES_DATABASE_URL')
SLAVE_DATABASES = ['slave']
CACHE_PREFIX = 'olympia.%s' % ENV
KEY_PREFIX = CACHE_PREFIX
CACHE_MIDDLEWARE_KEY_PREFIX = CACHE_PREFIX
CACHES = {}
CACHES['default'] = env.cache('CACHES_DEFAULT')
CACHES['default']['TIMEOUT'] = 500
CACHES['default']['BACKEND'] = 'caching.backends.memcached.MemcachedCache'
CACHES['default']['KEY_PREFIX'] = CACHE_PREFIX
SECRET_KEY = env('SECRET_KEY')
# Celery
BROKER_URL = env('BROKER_URL')
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
BROKER_CONNECTION_TIMEOUT = 0.5
NETAPP_STORAGE_ROOT = env(u'NETAPP_STORAGE_ROOT')
NETAPP_STORAGE = NETAPP_STORAGE_ROOT + u'/shared_storage'
GUARDED_ADDONS_PATH = NETAPP_STORAGE_ROOT + u'/guarded-addons'
MEDIA_ROOT = NETAPP_STORAGE + u'/uploads'
# Must be forced in settings because name => path can't be dyncamically
# computed: reviewer_attachmentS VS reviewer_attachment.
# TODO: rename folder on file system.
# (One can also just rename the setting, but this will not be consistent
# with the naming scheme.)
REVIEWER_ATTACHMENTS_PATH = MEDIA_ROOT + '/reviewer_attachment'
HERA = []
LOG_LEVEL = logging.DEBUG
LOGGING['loggers'].update({
'adi.updatecountsfromfile': {'level': logging.INFO},
'amqp': {'level': logging.WARNING},
'raven': {'level': logging.WARNING},
'requests': {'level': logging.WARNING},
'z.addons': {'level': logging.INFO},
'z.task': {'level': logging.DEBUG},
'z.hera': {'level': logging.INFO},
'z.redis': {'level': logging.DEBUG},
'z.pool': {'level': logging.ERROR},
})
REDIS_BACKEND = env('REDIS_BACKENDS_CACHE')
REDIS_BACKENDS = {
'cache': env('REDIS_BACKENDS_CACHE'),
'cache_slave': env('REDIS_BACKENDS_CACHE_SLAVE'),
'master': env('REDIS_BACKENDS_MASTER'),
'slave': env('REDIS_BACKENDS_SLAVE')
}
CACHE_MACHINE_USE_REDIS = True
# Old recaptcha V1
RECAPTCHA_PUBLIC_KEY = env('RECAPTCHA_PUBLIC_KEY')
RECAPTCHA_PRIVATE_KEY = env('RECAPTCHA_PRIVATE_KEY')
# New Recaptcha V2
NOBOT_RECAPTCHA_PUBLIC_KEY = env('NOBOT_RECAPTCHA_PUBLIC_KEY')
NOBOT_RECAPTCHA_PRIVATE_KEY = env('NOBOT_RECAPTCHA_PRIVATE_KEY')
TMP_PATH = os.path.join(NETAPP_STORAGE, u'tmp')
PACKAGER_PATH = os.path.join(TMP_PATH, 'packager')
ADDONS_PATH = NETAPP_STORAGE_ROOT + u'/files'
SPIDERMONKEY = '/usr/bin/tracemonkey'
# Remove DetectMobileMiddleware from middleware in production.
detect = 'mobility.middleware.DetectMobileMiddleware'
RESPONSYS_ID = env('RESPONSYS_ID')
ES_TIMEOUT = 60
ES_HOSTS = env('ES_HOSTS')
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = dict((k, '%s_%s' % (v, ENV)) for k, v in ES_INDEXES.items())
STATSD_HOST = env('STATSD_HOST')
STATSD_PREFIX = env('STATSD_PREFIX')
GRAPHITE_HOST = env('GRAPHITE_HOST')
GRAPHITE_PREFIX = env('GRAPHITE_PREFIX')
CEF_PRODUCT = STATSD_PREFIX
NEW_FEATURES = True
CLEANCSS_BIN = 'cleancss'
UGLIFY_BIN = 'uglifyjs'
LESS_PREPROCESS = True
XSENDFILE_HEADER = 'X-Accel-Redirect'
GOOGLE_ANALYTICS_CREDENTIALS = env.dict('GOOGLE_ANALYTICS_CREDENTIALS')
GOOGLE_ANALYTICS_CREDENTIALS['user_agent'] = None
GOOGLE_ANALYTICS_CREDENTIALS['token_expiry'] = datetime.datetime(2013, 1, 3, 1, 20, 16, 45465) # noqa
GOOGLE_API_CREDENTIALS = env('GOOGLE_API_CREDENTIALS')
GEOIP_URL = 'https://geo.services.mozilla.com'
AES_KEYS = env.dict('AES_KEYS')
# Signing
SIGNING_SERVER = env('SIGNING_SERVER')
PRELIMINARY_SIGNING_SERVER = env('PRELIMINARY_SIGNING_SERVER')
PAYPAL_APP_ID = env('PAYPAL_APP_ID')
PAYPAL_EMBEDDED_AUTH = {
'USER': env('PAYPAL_EMBEDDED_AUTH_USER'),
'PASSWORD': env('PAYPAL_EMBEDDED_AUTH_PASSWORD'),
'SIGNATURE': env('PAYPAL_EMBEDDED_AUTH_SIGNATURE'),
}
PAYPAL_CGI_AUTH = PAYPAL_EMBEDDED_AUTH
SENTRY_DSN = env('SENTRY_DSN')
GOOGLE_ANALYTICS_DOMAIN = 'addons.mozilla.org'
NEWRELIC_ENABLE = env.bool('NEWRELIC_ENABLE', default=False)
if NEWRELIC_ENABLE:
NEWRELIC_INI = '/etc/newrelic.d/%s.ini' % DOMAIN
FXA_CONFIG = {
'client_id': env('FXA_CLIENT_ID'),
'client_secret': env('FXA_CLIENT_SECRET'),
'content_host': 'https://accounts.firefox.com',
'oauth_host': 'https://oauth.accounts.firefox.com/v1',
'profile_host': 'https://profile.accounts.firefox.com/v1',
'redirect_url':
'https://addons.mozilla.org/api/v3/accounts/authorize/',
'scope': 'profile',
}
VALIDATOR_TIMEOUT = 360
ES_DEFAULT_NUM_SHARDS = 10
READ_ONLY = env.bool('READ_ONLY', default=False)
RAVEN_DSN = (
'https://[email protected]/78')
RAVEN_WHITELIST = ['addons.mozilla.org', 'addons.cdn.mozilla.net']
| 28.60181 | 102 | 0.746401 |
0e536b3dc51ef377e0aee111d7cee2012e1873e9
| 13,231 |
py
|
Python
|
easyquotation/jsl.py
|
tces1/easyquotation
|
63ab5011aa834a8f5b67991fe676e8b991e70952
|
[
"MIT"
] | null | null | null |
easyquotation/jsl.py
|
tces1/easyquotation
|
63ab5011aa834a8f5b67991fe676e8b991e70952
|
[
"MIT"
] | 1 |
2021-07-29T04:13:59.000Z
|
2021-07-29T04:13:59.000Z
|
easyquotation/jsl.py
|
tces1/easyquotation
|
63ab5011aa834a8f5b67991fe676e8b991e70952
|
[
"MIT"
] | null | null | null |
# coding:utf8
"""
获取集思路的数据
"""
import json
import time
import requests
class Jsl:
"""
抓取集思路的分级A数据
"""
# 分级A的接口
__funda_url = "http://www.jisilu.cn/data/sfnew/funda_list/?___t={ctime:d}"
# 分级B的接口
__fundb_url = "http://www.jisilu.cn/data/sfnew/fundb_list/?___t={ctime:d}"
# 母基接口
__fundm_url = "https://www.jisilu.cn/data/sfnew/fundm_list/?___t={ctime:d}"
# 分级套利的接口
__fundarb_url = (
"http://www.jisilu.cn/data/sfnew/arbitrage_vip_list/?___t={ctime:d}"
)
# 集思录登录接口
__jsl_login_url = "https://www.jisilu.cn/account/ajax/login_process/"
# 集思录 ETF 接口
# pylint: disable=line-too-long
__etf_index_url = "https://www.jisilu.cn/data/etf/etf_list/?___jsl=LST___t={ctime:d}&rp=25&page=1"
# 黄金 ETF , 货币 ETF 留坑,未完成
__etf_gold_url = (
"https://www.jisilu.cn/jisiludata/etf.php?qtype=pmetf&___t={ctime:d}"
)
__etf_money_url = (
"https://www.jisilu.cn/data/money_fund/list/?___t={ctime:d}"
)
# 集思录QDII接口
__qdii_url = "https://www.jisilu.cn/data/qdii/qdii_list/?___t={ctime:d}"
# 可转债
__cb_url = "https://www.jisilu.cn/data/cbnew/cb_list/?___t={ctime:d}"
# 分级A数据
# 返回的字典格式
# { 150022:
# {'abrate': '5:5',
# 'calc_info': None,
# 'coupon_descr': '+3.0%',
# 'coupon_descr_s': '+3.0%',
# 'fund_descr': '每年第一个工作日定折,无下折,A不参与上折,净值<1元无定折',
# 'funda_amount': 178823,
# 'funda_amount_increase': '0',
# 'funda_amount_increase_rt': '0.00%',
# 'funda_base_est_dis_rt': '2.27%',
# 'funda_base_est_dis_rt_t1': '2.27%',
# 'funda_base_est_dis_rt_t2': '-0.34%',
# 'funda_base_est_dis_rt_tip': '',
# 'funda_base_fund_id': '163109',
# 'funda_coupon': '5.75',
# 'funda_coupon_next': '4.75',
# 'funda_current_price': '0.783',
# 'funda_discount_rt': '24.75%',
# 'funda_id': '150022',
# 'funda_increase_rt': '0.00%',
# 'funda_index_id': '399001',
# 'funda_index_increase_rt': '0.00%',
# 'funda_index_name': '深证成指',
# 'funda_left_year': '永续',
# 'funda_lower_recalc_rt': '1.82%',
# 'funda_name': '深成指A',
# 'funda_nav_dt': '2015-09-14',
# 'funda_profit_rt': '7.74%',
# 'funda_profit_rt_next': '6.424%',
# 'funda_value': '1.0405',
# 'funda_volume': '0.00',
# 'fundb_upper_recalc_rt': '244.35%',
# 'fundb_upper_recalc_rt_info': '深成指A不参与上折',
# 'last_time': '09:18:22',
# 'left_recalc_year': '0.30411',
# 'lower_recalc_profit_rt': '-',
# 'next_recalc_dt': '<span style="font-style:italic">2016-01-04</span>',
# 'owned': 0,
# 'status_cd': 'N'}
# }
def __init__(self):
self.__funda = None
self.__fundm = None
self.__fundb = None
self.__fundarb = None
self.__etfindex = None
self.__qdii = None
self.__cb = None
@staticmethod
def formatfundajson(fundajson):
"""格式化集思录返回的json数据,以字典形式保存"""
result = {}
for row in fundajson["rows"]:
funda_id = row["id"]
cell = row["cell"]
result[funda_id] = cell
return result
@staticmethod
def formatfundbjson(fundbjson):
"""格式化集思录返回的json数据,以字典形式保存"""
result = {}
for row in fundbjson["rows"]:
cell = row["cell"]
fundb_id = cell["fundb_id"]
result[fundb_id] = cell
return result
@staticmethod
def formatetfindexjson(fundbjson):
"""格式化集思录返回 指数ETF 的json数据,以字典形式保存"""
result = {}
for row in fundbjson["rows"]:
cell = row["cell"]
fundb_id = cell["fund_id"]
result[fundb_id] = cell
return result
@staticmethod
def formatjisilujson(data):
result = {}
for row in data["rows"]:
cell = row["cell"]
id_ = row["id"]
result[id_] = cell
return result
@staticmethod
def percentage2float(per):
"""
将字符串的百分数转化为浮点数
:param per:
:return:
"""
return float(per.strip("%")) / 100.
def funda(
self,
fields=None,
min_volume=0,
min_discount=0,
ignore_nodown=False,
forever=False,
):
"""以字典形式返回分级A数据
:param fields:利率范围,形如['+3.0%', '6.0%']
:param min_volume:最小交易量,单位万元
:param min_discount:最小折价率, 单位%
:param ignore_nodown:是否忽略无下折品种,默认 False
:param forever: 是否选择永续品种,默认 False
"""
if fields is None:
fields = []
# 添加当前的ctime
self.__funda_url = self.__funda_url.format(ctime=int(time.time()))
# 请求数据
rep = requests.get(self.__funda_url)
# 获取返回的json字符串
fundajson = json.loads(rep.text)
# 格式化返回的json字符串
data = self.formatfundajson(fundajson)
# 过滤小于指定交易量的数据
if min_volume:
data = {
k: data[k]
for k in data
if float(data[k]["funda_volume"]) > min_volume
}
if len(fields):
data = {
k: data[k]
for k in data
if data[k]["coupon_descr_s"] in "".join(fields)
}
if ignore_nodown:
data = {
k: data[k]
for k in data
if data[k]["fund_descr"].find("无下折") == -1
}
if forever:
data = {
k: data[k]
for k in data
if data[k]["funda_left_year"].find("永续") != -1
}
if min_discount:
data = {
k: data[k]
for k in data
if float(data[k]["funda_discount_rt"][:-1]) > min_discount
}
self.__funda = data
return self.__funda
def fundm(self):
"""以字典形式返回分级母基数据
"""
# 添加当前的ctime
self.__fundm_url = self.__fundm_url.format(ctime=int(time.time()))
# 请求数据
rep = requests.get(self.__fundm_url)
# 获取返回的json字符串
fundmjson = json.loads(rep.text)
# 格式化返回的json字符串
data = self.formatfundajson(fundmjson)
self.__fundm = data
return self.__fundm
def fundb(self, fields=None, min_volume=0, min_discount=0, forever=False):
"""以字典形式返回分级B数据
:param fields:利率范围,形如['+3.0%', '6.0%']
:param min_volume:最小交易量,单位万元
:param min_discount:最小折价率, 单位%
:param forever: 是否选择永续品种,默认 False
"""
if fields is None:
fields = []
# 添加当前的ctime
self.__fundb_url = self.__fundb_url.format(ctime=int(time.time()))
# 请求数据
rep = requests.get(self.__fundb_url)
# 获取返回的json字符串
fundbjson = json.loads(rep.text)
# 格式化返回的json字符串
data = self.formatfundbjson(fundbjson)
# 过滤小于指定交易量的数据
if min_volume:
data = {
k: data[k]
for k in data
if float(data[k]["fundb_volume"]) > min_volume
}
if len(fields):
data = {
k: data[k]
for k in data
if data[k]["coupon_descr_s"] in "".join(fields)
}
if forever:
data = {
k: data[k]
for k in data
if data[k]["fundb_left_year"].find("永续") != -1
}
if min_discount:
data = {
k: data[k]
for k in data
if float(data[k]["fundb_discount_rt"][:-1]) > min_discount
}
self.__fundb = data
return self.__fundb
def fundarb(
self,
jsl_username,
jsl_password,
avolume=100,
bvolume=100,
ptype="price",
):
"""以字典形式返回分级A数据
:param jsl_username: 集思录用户名
:param jsl_password: 集思路登录密码
:param avolume: A成交额,单位百万
:param bvolume: B成交额,单位百万
:param ptype: 溢价计算方式,price=现价,buy=买一,sell=卖一
"""
session = requests.session()
headers = {
# pylint: disable=line-too-long
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
}
session.headers.update(headers)
logindata = dict(
return_url="http://www.jisilu.cn/",
user_name=jsl_username,
password=jsl_password,
net_auto_login="1",
_post_type="ajax",
)
rep = session.post(self.__jsl_login_url, data=logindata)
if rep.json()["err"] is not None:
return rep.json()
# 添加当前的ctime
fundarb_url = self.__fundarb_url.format(ctime=int(time.time()))
pdata = dict(
avolume=avolume,
bvolume=bvolume,
ptype=ptype,
is_search="1",
market=["sh", "sz"],
rp="50",
)
# 请求数据
rep = session.post(fundarb_url, data=pdata)
# 获取返回的json字符串
fundajson = json.loads(rep.text)
# 格式化返回的json字符串
data = self.formatfundajson(fundajson)
self.__fundarb = data
return self.__fundarb
def etfindex(
self, index_id="", min_volume=0, max_discount=None, min_discount=None
):
"""
以字典形式返回 指数ETF 数据
:param index_id: 获取指定的指数
:param min_volume: 最小成交量
:param min_discount: 最低溢价率, 适用于溢价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可
:param max_discount: 最高溢价率, 适用于折价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可
:return: {"fund_id":{}}
"""
# 添加当前的ctime
etf_index_url = self.__etf_index_url.format(ctime=int(time.time()))
# 请求数据
etf_json = requests.get(etf_index_url).json()
# 格式化返回的json字符串
data = self.formatetfindexjson(etf_json)
# 过滤
if index_id:
# 指定跟踪的指数代码
data = {
fund_id: cell
for fund_id, cell in data.items()
if cell["index_id"] == index_id
}
if min_volume:
# 过滤小于指定交易量的数据
data = {
fund_id: cell
for fund_id, cell in data.items()
if float(cell["volume"]) >= min_volume
}
if min_discount is not None:
# 指定最小溢价率
if isinstance(min_discount, str):
if min_discount.endswith("%"):
# 如果是字符串形式,先转为浮点形式
min_discount = self.percentage2float(min_discount)
else:
min_discount = float(min_discount) / 100.
data = {
fund_id: cell
for fund_id, cell in data.items()
if self.percentage2float(cell["discount_rt"]) >= min_discount
}
if max_discount is not None:
# 指定最大溢价率
if isinstance(max_discount, str):
if max_discount.endswith("%"):
# 如果是字符串形式,先转为浮点形式
max_discount = self.percentage2float(max_discount)
else:
max_discount = float(max_discount) / 100.
data = {
fund_id: cell
for fund_id, cell in data.items()
if self.percentage2float(cell["discount_rt"]) <= max_discount
}
self.__etfindex = data
return self.__etfindex
def qdii(self, min_volume=0):
"""以字典形式返回QDII数据
:param min_volume:最小交易量,单位万元
"""
# 添加当前的ctime
self.__qdii_url = self.__qdii_url.format(ctime=int(time.time()))
# 请求数据
rep = requests.get(self.__qdii_url)
# 获取返回的json字符串
fundjson = json.loads(rep.text)
# 格式化返回的json字符串
data = self.formatjisilujson(fundjson)
data = {x: y for x, y in data.items() if y["notes"] != "估值有问题"}
# 过滤小于指定交易量的数据
if min_volume:
data = {
k: data[k]
for k in data
if float(data[k]["volume"]) > min_volume
}
self.__qdii = data
return self.__qdii
# pylint: disable=invalid-name
def cb(self, min_volume=0):
"""以字典形式返回QDII数据
:param min_volume:最小交易量,单位万元
"""
# 添加当前的ctime
self.__cb_url = self.__cb_url.format(ctime=int(time.time()))
# 请求数据
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
'Cookie':'在你的浏览器里找到cookie'}
session = requests.Session()
rep = session.get(self.__cb_url,headers=headers)
# 获取返回的json字符串
fundjson = json.loads(rep.text)
# 格式化返回的json字符串
data = self.formatjisilujson(fundjson)
# 过滤小于指定交易量的数据
if min_volume:
data = {
k: data[k]
for k in data
if float(data[k]["volume"]) > min_volume
}
self.__cb = data
return self.__cb
if __name__ == "__main__":
Jsl().etfindex(
index_id="000016",
min_volume=0,
max_discount="-0.4",
min_discount="-1.3%",
)
| 29.337029 | 138 | 0.519462 |
9e4acd586c92df4250b461dafb48881e15fc3a7c
| 9,735 |
py
|
Python
|
src/sdk/pynni/nni/pbt_tuner/pbt_tuner.py
|
longcw/nni
|
300c2cc997aa58093e0a766b4a37266cd3538417
|
[
"MIT"
] | 1 |
2020-12-13T16:13:29.000Z
|
2020-12-13T16:13:29.000Z
|
src/sdk/pynni/nni/pbt_tuner/pbt_tuner.py
|
longcw/nni
|
300c2cc997aa58093e0a766b4a37266cd3538417
|
[
"MIT"
] | null | null | null |
src/sdk/pynni/nni/pbt_tuner/pbt_tuner.py
|
longcw/nni
|
300c2cc997aa58093e0a766b4a37266cd3538417
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
import logging
import os
import numpy as np
import nni
from nni.tuner import Tuner
from nni.utils import OptimizeMode, extract_scalar_reward, split_index, json2parameter, json2space
logger = logging.getLogger('pbt_tuner_AutoML')
def exploit_and_explore(bot_trial_info, top_trial_info, factors, epoch, search_space):
"""
Replace checkpoint of bot_trial with top, and perturb hyperparameters
Parameters
----------
bot_trial_info : TrialInfo
bottom model whose parameters should be replaced
top_trial_info : TrialInfo
better model
factors : float
factors for perturbation
epoch : int
step of PBTTuner
search_space : dict
search_space to keep perturbed hyperparameters in range
"""
bot_checkpoint_dir = bot_trial_info.checkpoint_dir
top_hyper_parameters = top_trial_info.hyper_parameters
hyper_parameters = copy.deepcopy(top_hyper_parameters)
# TODO think about different type of hyperparameters for 1.perturbation 2.within search space
for key in hyper_parameters.keys():
if key == 'load_checkpoint_dir':
hyper_parameters[key] = hyper_parameters['save_checkpoint_dir']
elif key == 'save_checkpoint_dir':
hyper_parameters[key] = os.path.join(bot_checkpoint_dir, str(epoch))
elif isinstance(hyper_parameters[key], float):
perturb = np.random.choice(factors)
val = hyper_parameters[key] * perturb
lb, ub = search_space[key]["_value"][:2]
if search_space[key]["_type"] in ("uniform", "normal"):
val = np.clip(val, lb, ub).item()
hyper_parameters[key] = val
else:
continue
bot_trial_info.hyper_parameters = hyper_parameters
bot_trial_info.clean_id()
class TrialInfo:
"""
Information of each trial, refresh for each epoch
"""
def __init__(self, checkpoint_dir=None, hyper_parameters=None, parameter_id=None, score=None):
self.checkpoint_dir = checkpoint_dir
self.hyper_parameters = hyper_parameters
self.parameter_id = parameter_id
self.score = score
def clean_id(self):
self.parameter_id = None
class PBTTuner(Tuner):
def __init__(self, optimize_mode="maximize", all_checkpoint_dir=None, population_size=10, factors=(1.2, 0.8), fraction=0.2):
"""
Initialization
Parameters
----------
optimize_mode : str
maximize or minimize
all_checkpoint_dir : str
directory to store training model checkpoint
population_size : int
number of trials for each epoch
factors : tuple
factors for perturbation
fraction : float
fraction for selecting bottom and top trials
"""
self.optimize_mode = OptimizeMode(optimize_mode)
if all_checkpoint_dir is None:
all_checkpoint_dir = os.getenv('NNI_CHECKPOINT_DIRECTORY')
logger.info("Checkpoint dir is set to %s by default.", all_checkpoint_dir)
self.all_checkpoint_dir = all_checkpoint_dir
self.population_size = population_size
self.factors = factors
self.fraction = fraction
# defined in trial code
#self.perturbation_interval = perturbation_interval
self.population = None
self.pos = -1
self.param_ids = []
self.running = {}
self.finished = []
self.credit = 0
self.finished_trials = 0
self.epoch = 0
self.searchspace_json = None
self.space = None
self.send_trial_callback = None
logger.info('PBT tuner initialization')
def update_search_space(self, search_space):
"""
Get search space
Parameters
----------
search_space : dict
Search space
"""
logger.info('Update search space %s', search_space)
self.searchspace_json = search_space
self.space = json2space(self.searchspace_json)
self.random_state = np.random.RandomState()
self.population = []
is_rand = dict()
for item in self.space:
is_rand[item] = True
for i in range(self.population_size):
hyper_parameters = json2parameter(
self.searchspace_json, is_rand, self.random_state)
checkpoint_dir = os.path.join(self.all_checkpoint_dir, str(i))
hyper_parameters['load_checkpoint_dir'] = os.path.join(checkpoint_dir, str(self.epoch))
hyper_parameters['save_checkpoint_dir'] = os.path.join(checkpoint_dir, str(self.epoch))
self.population.append(TrialInfo(checkpoint_dir=checkpoint_dir, hyper_parameters=hyper_parameters))
def generate_multiple_parameters(self, parameter_id_list, **kwargs):
"""
Returns multiple sets of trial (hyper-)parameters, as iterable of serializable objects.
Parameters
----------
parameter_id_list : list of int
Unique identifiers for each set of requested hyper-parameters.
These will later be used in :meth:`receive_trial_result`.
**kwargs
Used for send_trial_callback.
Returns
-------
list
A list of newly generated configurations
"""
result = []
self.send_trial_callback = kwargs['st_callback']
for parameter_id in parameter_id_list:
had_exception = False
try:
logger.debug("generating param for %s", parameter_id)
res = self.generate_parameters(parameter_id, **kwargs)
except nni.NoMoreTrialError:
had_exception = True
if not had_exception:
result.append(res)
return result
def generate_parameters(self, parameter_id, **kwargs):
"""
Generate parameters, if no trial configration for now, self.credit plus 1 to send the config later
Parameters
----------
parameter_id : int
Unique identifier for requested hyper-parameters.
This will later be used in :meth:`receive_trial_result`.
**kwargs
Not used
Returns
-------
dict
One newly generated configuration
"""
if self.pos == self.population_size - 1:
logger.debug('Credit added by one in parameters request')
self.credit += 1
self.param_ids.append(parameter_id)
raise nni.NoMoreTrialError('No more parameters now.')
self.pos += 1
trial_info = self.population[self.pos]
trial_info.parameter_id = parameter_id
self.running[parameter_id] = trial_info
logger.info('Generate parameter : %s', trial_info.hyper_parameters)
return split_index(trial_info.hyper_parameters)
def receive_trial_result(self, parameter_id, parameters, value, **kwargs):
"""
Receive trial's result. if the number of finished trials equals ``self.population_size``, start the next epoch to
train the model.
Parameters
----------
parameter_id : int
Unique identifier of used hyper-parameters, same with :meth:`generate_parameters`.
parameters : dict
Hyper-parameters generated by :meth:`generate_parameters`.
value : dict
Result from trial (the return value of :func:`nni.report_final_result`).
"""
logger.info('Get one trial result, id = %d, value = %s', parameter_id, value)
value = extract_scalar_reward(value)
if self.optimize_mode == OptimizeMode.Minimize:
value = -value
trial_info = self.running.pop(parameter_id, None)
trial_info.score = value
self.finished.append(trial_info)
self.finished_trials += 1
if self.finished_trials == self.population_size:
logger.info('Proceeding to next epoch')
self.epoch += 1
self.population = []
self.pos = -1
self.running = {}
#exploit and explore
self.finished = sorted(self.finished, key=lambda x: x.score, reverse=True)
cutoff = int(np.ceil(self.fraction * len(self.finished)))
tops = self.finished[:cutoff]
bottoms = self.finished[self.finished_trials - cutoff:]
for bottom in bottoms:
top = np.random.choice(tops)
exploit_and_explore(bottom, top, self.factors, self.epoch, self.searchspace_json)
for trial in self.finished:
if trial not in bottoms:
trial.clean_id()
trial.hyper_parameters['load_checkpoint_dir'] = trial.hyper_parameters['save_checkpoint_dir']
trial.hyper_parameters['save_checkpoint_dir'] = os.path.join(trial.checkpoint_dir, str(self.epoch))
self.finished_trials = 0
for _ in range(self.population_size):
trial_info = self.finished.pop()
self.population.append(trial_info)
while self.credit > 0 and self.pos + 1 < len(self.population):
self.credit -= 1
self.pos += 1
parameter_id = self.param_ids.pop()
trial_info = self.population[self.pos]
trial_info.parameter_id = parameter_id
self.running[parameter_id] = trial_info
self.send_trial_callback(parameter_id, split_index(trial_info.hyper_parameters))
def import_data(self, data):
pass
| 37.298851 | 128 | 0.625064 |
0dba90ad307792db6001ae9837a24c6c8e63b9a0
| 1,050 |
py
|
Python
|
hahomematic/platforms/button.py
|
towo/hahomematic
|
60034cdbced3251d739ccf48b42acdcf55fecc8d
|
[
"MIT"
] | null | null | null |
hahomematic/platforms/button.py
|
towo/hahomematic
|
60034cdbced3251d739ccf48b42acdcf55fecc8d
|
[
"MIT"
] | null | null | null |
hahomematic/platforms/button.py
|
towo/hahomematic
|
60034cdbced3251d739ccf48b42acdcf55fecc8d
|
[
"MIT"
] | null | null | null |
"""
Module for entities implemented using the
button platform (https://www.home-assistant.io/integrations/button/).
"""
from __future__ import annotations
import logging
from typing import Any
from hahomematic.const import HmPlatform
import hahomematic.device as hm_device
from hahomematic.entity import GenericEntity
_LOGGER = logging.getLogger(__name__)
class HmButton(GenericEntity[None]):
"""
Implementation of a button.
This is a default platform that gets automatically generated.
"""
def __init__(
self,
device: hm_device.HmDevice,
unique_id: str,
address: str,
parameter: str,
parameter_data: dict[str, Any],
):
super().__init__(
device=device,
unique_id=unique_id,
address=address,
parameter=parameter,
parameter_data=parameter_data,
platform=HmPlatform.BUTTON,
)
async def press(self) -> None:
"""Handle the button press."""
await self.send_value(True)
| 24.418605 | 69 | 0.654286 |
4d936cc6b8379a6970fd7506cc88030261b2f006
| 2,088 |
py
|
Python
|
examples/python/multiple_instances.py
|
johny-c/ViZDoom
|
6fe0d2470872adbfa5d18c53c7704e6ff103cacc
|
[
"MIT"
] | 3 |
2020-08-25T08:10:54.000Z
|
2021-01-29T01:05:35.000Z
|
examples/python/multiple_instances.py
|
johny-c/ViZDoom
|
6fe0d2470872adbfa5d18c53c7704e6ff103cacc
|
[
"MIT"
] | null | null | null |
examples/python/multiple_instances.py
|
johny-c/ViZDoom
|
6fe0d2470872adbfa5d18c53c7704e6ff103cacc
|
[
"MIT"
] | 2 |
2020-08-28T02:00:12.000Z
|
2021-03-28T03:19:49.000Z
|
#!/usr/bin/env python3
from __future__ import print_function
from random import choice
from vizdoom import *
# For multiplayer game use process (ZDoom's multiplayer sync mechanism prevents threads to work as expected).
from multiprocessing import Process
# For singleplayer games threads can also be used.
# from threading import Thread
# Run this many episodes
episodes = 10
def player1():
game = DoomGame()
# game.load_config('../../scenarios/basic.cfg')
# or
game.load_config('../../scenarios/multi_duel.cfg')
game.add_game_args("-host 2 -deathmatch +timelimit 1 +sv_spawnfarthest 1")
game.add_game_args("+name Player1 +colorset 0")
game.init()
actions = [[True, False, False], [False, True, False], [False, False, True]]
for i in range(episodes):
print("Episode #" + str(i + 1))
while not game.is_episode_finished():
if game.is_player_dead():
game.respawn_player()
game.make_action(choice(actions))
print("Episode finished!")
print("Player1 frags:", game.get_game_variable(GameVariable.FRAGCOUNT))
# Starts a new episode. All players have to call new_episode() in multiplayer mode.
game.new_episode()
game.close()
def player2():
game = DoomGame()
# game.load_config('../config/basic.cfg')
# or
game.load_config('../../scenarios/multi_duel.cfg')
game.add_game_args("-join 127.0.0.1")
game.add_game_args("+name Player2 +colorset 3")
game.init()
actions = [[True, False, False], [False, True, False], [False, False, True]]
for i in range(episodes):
while not game.is_episode_finished():
if game.is_player_dead():
game.respawn_player()
game.make_action(choice(actions))
print("Player2 frags:", game.get_game_variable(GameVariable.FRAGCOUNT))
game.new_episode()
game.close()
# p1 = Thread(target = player1)
# p1.start()
if __name__ == '__main__':
p1 = Process(target=player1)
p1.start()
player2()
print("Done")
| 24.564706 | 109 | 0.64751 |
736b7229daa549b169079f63a2441d595cb3c420
| 1,836 |
py
|
Python
|
plmcpi.py
|
niu541412/plmcpi
|
2a76861afb4c2a08d8da2457dd9c53a91554db68
|
[
"MIT"
] | null | null | null |
plmcpi.py
|
niu541412/plmcpi
|
2a76861afb4c2a08d8da2457dd9c53a91554db68
|
[
"MIT"
] | null | null | null |
plmcpi.py
|
niu541412/plmcpi
|
2a76861afb4c2a08d8da2457dd9c53a91554db68
|
[
"MIT"
] | null | null | null |
def mc_pi(n): # Sampling n points to calculate pi.
#import time
import numpy as np
#a = time.time()
m = 0
pi = [None] * n
x = np.random.uniform(-1, 1, n)
y = np.random.uniform(-1, 1, n)
for i in range(0, n):
if (x[i]**2 + y[i]**2) <= 1:
m = m + 1
pi[i] = 4.0 * m / (i + 1)
#b = time.time() - a
#print("Toal time: %.1fsec\n" % (b))
return (pi[n - 1])
# use every core of CPU to parallel calculate pi, loop t times, every time sampling n points.
def pl_mc_pi(n, t):
import time
import sys
import multiprocessing
import numpy as np
a = time.time()
cores = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=cores)
cnt = 0
pi = [None] * t
for y in pool.imap_unordered(mc_pi, [n] * t):
pi[cnt] = y
m = np.mean(pi[0:cnt + 1])
cnt += 1
sys.stdout.write('done %d/%d, current pi is %f\r' % (cnt, t, m))
b = time.time() - a
print("\nToal time: %.1fsec\n" % (b))
return np.mean(pi)
def mc_pi_plot(n, tt): # want to plot an animation to show the progress?
import time
import numpy as np
import matplotlib.pyplot as plt
%matplotlib osx
plt.close()
a = time.time()
m = 0
pi = [None] * n
co = [None] * n
x = np.random.uniform(-1, 1, n)
y = np.random.uniform(-1, 1, n)
plt.axis('scaled')
plt.axis([-1, 1, -1, 1])
for i in range(0, n):
if (x[i]**2 + y[i]**2) <= 1:
m = m + 1
co[i] = 'r'
else:
co[i] = 'k'
pi[i] = 4.0 * m / (i + 1)
plt.scatter(x[i], y[i], s=0.75, marker='.', c=co[i], alpha=.5)
if tt:
plt.pause(tt)
b = time.time() - a
print("Toal time: %.1fsec\n" % (b))
if tt:
plt.show()
return (pi[n - 1])
| 27 | 93 | 0.497277 |
95c2b5da575a74a92db8d586e714534c23fb24b2
| 937 |
py
|
Python
|
post-res/cache-hit/x.py
|
Xuanwo/openacid.github.io
|
6e347aff98b25f6cd02acf7acb81ede98c78f6fa
|
[
"MIT"
] | 2 |
2021-11-09T21:04:56.000Z
|
2022-01-03T14:50:22.000Z
|
post-res/cache-hit/x.py
|
Xuanwo/openacid.github.io
|
6e347aff98b25f6cd02acf7acb81ede98c78f6fa
|
[
"MIT"
] | 24 |
2019-02-01T05:40:37.000Z
|
2022-02-09T16:55:35.000Z
|
post-res/cache-hit/x.py
|
Xuanwo/openacid.github.io
|
6e347aff98b25f6cd02acf7acb81ede98c78f6fa
|
[
"MIT"
] | 1 |
2021-11-09T21:04:57.000Z
|
2021-11-09T21:04:57.000Z
|
#!/usr/bin/env python
# coding: utf-8
# 228 39 1.38 0.0353846 avg fsize: 1.52MB fcnt: 149 million
# 385 43 1.60 0.0372093 avg fsize: 1.39MB fcnt: 278 million
# 391 30 1.27 0.0423333
# total fcnt: 370 billion
# total fsize: 556 PB
# uncached:
# solve (149000000^(x+1)-1)/(1-0.0353846) = (278000000^(x+1)-1)/(1-0.0372093)
# https://www.wolframalpha.com/input/?i=solve+(149000000%5E(x%2B1)-1)%2F(1-0.0353846)+%3D+(278000000%5E(x%2B1)-1)%2F(1-0.0372093)
# a = -1.00304
# a+1 = -0.00304
a = -1.00304
n = 370*1024**3
fsize = 1.5*1024**2
a1 = a+1
N = n**a1-1
s1=149*1024**2
s2=278*1024**2
b1 = 628*1024**2 / 8
# PB
cached = 9.36 * 1024**5
cachedfcnt = cached / fsize
bcnt1 = b1/fsize
def get_b(s):
S = s**a1-1
b = 1-S/N
return b
def not_cached_fn(s):
S = s**a1-1
return N-S
S1 = s1**a1
c= bcnt1/(n**a1-S1) * a1
print cachedfcnt
nocache_acc = (n**a1-cachedfcnt**a1) * c/a1
# per second
print nocache_acc
| 15.881356 | 129 | 0.620064 |
2e721a7ff08efcfe69c686823ea0511611e0113b
| 138 |
py
|
Python
|
docs/ui/examples/example57d19e3d2e848cca1e3922b7d896c87e.py
|
okajun35/Flexx_translate_ja
|
d6aaf2d981623e69cd70d20761b6509ed5af304f
|
[
"MIT"
] | 1 |
2022-03-09T03:35:56.000Z
|
2022-03-09T03:35:56.000Z
|
docs/ui/examples/example57d19e3d2e848cca1e3922b7d896c87e.py
|
okajun35/Flexx_translate_ja
|
d6aaf2d981623e69cd70d20761b6509ed5af304f
|
[
"MIT"
] | null | null | null |
docs/ui/examples/example57d19e3d2e848cca1e3922b7d896c87e.py
|
okajun35/Flexx_translate_ja
|
d6aaf2d981623e69cd70d20761b6509ed5af304f
|
[
"MIT"
] | null | null | null |
from flexx import app, ui
class Example(ui.Widget):
def init(self):
ui.Button(text='hello')
ui.Button(text='world')
| 17.25 | 31 | 0.615942 |
a773d13fc15a2025996fc46f6eaa0d7d3aee7924
| 13,341 |
py
|
Python
|
stm.py
|
yuukimiyo/python_stm_jp_unofficial
|
322506f040289639192a683a92ef48424e561121
|
[
"MIT"
] | 11 |
2020-02-07T05:26:08.000Z
|
2021-11-27T09:51:24.000Z
|
stm.py
|
yuukimiyo/python_stm_jp_unofficial
|
322506f040289639192a683a92ef48424e561121
|
[
"MIT"
] | null | null | null |
stm.py
|
yuukimiyo/python_stm_jp_unofficial
|
322506f040289639192a683a92ef48424e561121
|
[
"MIT"
] | 1 |
2020-02-10T02:44:37.000Z
|
2020-02-10T02:44:37.000Z
|
# MIT License
#
# Copyright (c) 2018-2019 Hiroki Iida / Retrieva, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This code is available under the MIT License.
import numpy as np
import scipy as sp
from scipy.special import logsumexp
import lda
import utils
from abc import ABCMeta, abstractmethod
class STM_base(metaclass=ABCMeta):
def __init__(self, K, X, Y, docs, V, sigma, interact=True):
self.X = X # DxPx matrix (Px is the num of tags)
self.K = K
self.D = len(docs)
if X is not None:
P = X.shape[1]
self.Gamma = np.zeros((P, K-1)) # parameter of topics prior
if Y is None:
Y = np.zeros(self.D, dtype=int)
self.Y = Y # Dx1 matrix
self.mu = np.zeros((self.D, K))
self.Sigma = np.diag(np.ones(K-1)) * sigma # if zero, no update. so using diag.
self.c_dv = np.zeros((self.D, V), dtype=int)
self.wd = np.zeros(self.D, dtype=int)
self.mv = np.zeros(V, dtype=int)
for m, doc in enumerate(docs):
for t in doc:
self.c_dv[m, t] += 1
self.wd[m] += 1
self.mv[t] += 1
self.mv = np.log(self.mv) - np.log(np.sum(self.mv))
self.docs = docs
self.docs_vocab = []
for doc in docs:
self.docs_vocab.append(sorted(list(set(doc))))
self.V = V
self.eta = np.zeros((self.D, K))
self.theta = np.exp(self.eta) / np.sum(np.exp(self.eta), axis=1)[:, np.newaxis]
self.A = np.unique(np.array(Y))
self.len_A = len(self.A)
self.phi = np.zeros((self.len_A, self.K, self.V))
def lda_initialize(self, alpha, beta, itr, voca, smartinit=True):
lda_init = lda.LDA(self.K, alpha, beta, self.docs, self.V, smartinit)
lda_init.learning(itr, voca)
Kalpha = self.K * alpha
self.theta = lda_init.n_m_z / (np.vectorize(len)(np.array(self.docs)) + Kalpha)[:, np.newaxis]
self.phi += lda_init.worddist()
del lda_init
def output_word_topic_dist(self, voca):
def output(phi, voca):
for k in range(self.K):
print("\n-- topic: {}".format(k))
for w in np.argsort(-phi[k])[:20]:
print("{}: {}".format(voca[w], phi[k, w]))
phi = np.average(self.phi, axis=0)
output(phi, voca)
def perplexity(self, docs=None, Y=None):
if docs is None:
docs = self.docs
if Y is None:
Y = self.Y
log_per = 0
N = 0
for m, (doc, a) in enumerate(zip(docs, Y)):
for w in doc:
log_per -= np.log(np.dot(self.phi[a, :, w], self.theta[m]))
N += len(doc)
return np.exp(log_per / N)
def learning(self, iteration, voca):
pre_perp = self.perplexity()
print("initial perplexity=%f" % pre_perp)
for i in range(iteration):
self.inference(i)
perp = self.perplexity()
print("-%d p=%f" % (i + 1, perp))
if pre_perp:
if pre_perp < perp:
self.output_word_topic_dist(voca)
pre_perp = None
else:
pre_perp = perp
self.output_word_topic_dist(voca)
def inference(self, iter_num):
"""learning once iteration"""
# E-step
# update q_eta and q_z
phi_updater, q_v, variance_topics = self.update_Estep()
# M-step
self.update_mu_and_Gamma()
# update Sigma
if iter_num > 10:
self.update_Sigma(q_v, variance_topics)
# update phi
self.update_phi(phi_updater)
def update_Estep(self):
E_count = np.zeros((len(self.A), self.K, self.V))
q_v = np.zeros((self.K - 1, self.K - 1))
variance_topics = np.zeros((self.K - 1, self.K - 1))
inv_Sigma = np.linalg.inv(self.Sigma)
for m, (_, i, a) in enumerate(zip(self.docs, self.docs_vocab, self.Y)):
# because fuzzy index induces copy
phi_a = self.phi[a, :, i].T
c_dv_d = self.c_dv[m, i]
self.eta[m], self.theta[m], q_z_d \
= utils.update_eta(m, self.K, self.eta[m],
phi_a, self.Sigma,
self.mu, c_dv_d, self.wd)
# prepare update Sigma(calc q_v) and phi(calc phi_tmp)
E_count[a, :, i] += (c_dv_d * q_z_d).T
hessian = utils.update_Hessian(self.K, q_z_d, c_dv_d, self.wd[m], self.theta[m], inv_Sigma)
q_v += np.linalg.inv(hessian)
diff_var_and_mean = self.calc_diff_var_and_mean(m)
variance_topics += np.outer(diff_var_and_mean, diff_var_and_mean)
return (E_count, q_v, variance_topics)
@abstractmethod
def update_mu_and_Gamma(self):
pass
def update_Sigma(self, q_v, variance_topics):
self.Sigma = (q_v + variance_topics) / len(self.docs)
@abstractmethod
def update_phi(self, E_count):
pass
class STM_jeff_base(STM_base):
def __init__(self, K, X, Y, docs, V, sigma, interact=True):
super().__init__(K, X, Y, docs, V, sigma, interact)
self.aspectmod = self.len_A > 1.0
self.interact = interact
self.coef_row = self.K + self.len_A * self.aspectmod + self.len_A * self.K * self.interact
self.kappa_params = np.zeros((self.coef_row, V))
self.kappa_sum = np.full((self.len_A, self.K, self.V), self.mv)
def jeffereysKappa(self, E_count):
def kappa_obj(kappa_param, kappa_other, c_k, bigC_k, gaussprec):
p1 = -1 * np.sum(c_k * kappa_param)
demon_kappas = kappa_other * np.exp(kappa_param)
lseout = np.log(np.sum(demon_kappas, axis=1))
p2 = np.sum(bigC_k * lseout)
p3 = 0.5 * np.sum(kappa_param**2 * gaussprec)
return p1 + p2 + p3
def kappa_grad(kappa_param, kappa_other, c_k, bigC_k, gaussprec):
denom_kappas = kappa_other * np.exp(kappa_param)
betaout = denom_kappas / np.sum(denom_kappas, axis=1)[:, np.newaxis]
p2 = np.sum(bigC_k[:, np.newaxis] * betaout, axis=0) # sum up the non focus axis
p3 = kappa_param * gaussprec
return -c_k + p2 + p3
if(not(self.aspectmod)):
KbyV = E_count[0]
KbyA = np.sum(KbyV, axis=1)
else:
KbyV = np.sum(E_count, axis=0)
KbyA = np.sum(E_count, axis=2).T
max_it = 3
tol = .001
kappamax_it = 1000
taumax_it = 1000
tautol = 1e-5
# define update indicater upmost
i_update_kv = self.K
if (self.aspectmod and self.interact):
i_update_ka = self.K + self.len_A
i_update_kav = self.coef_row
else:
i_update_ka = self.coef_row
i_update_kav = 0
opt_tau = np.vectorize(lambda x: 1/x**2 if x**2 > 1e-5 else 1e5)
for it in range(max_it):
compare = np.abs(self.kappa_params) < .001
for i in range(self.coef_row): # i:0~K-1=>update kv, K~K+A-1=>update ka, K+A~K+A+K*A-1=>update kav
kappa_init = self.kappa_params[i]
if i < i_update_kv:
k = i
c_k = KbyV[k, :]
bigC_k = KbyA[k, :]
self.kappa_sum[:, k, :] -= kappa_init
kappa_other = np.exp(self.kappa_sum[:, k, :])
elif i < i_update_ka:
a = i - self.K
c_k = np.sum(E_count[a], axis=0)
bigC_k = KbyA[:, a]
self.kappa_sum[a, :, :] -= kappa_init
kappa_other = np.exp(self.kappa_sum[a, :, :])
elif i < i_update_kav:
a, k = divmod(i-self.K-self.len_A, self.K)
c_k = E_count[a, k, :]
bigC_k = KbyA[k, a][np.newaxis]
self.kappa_sum[a, k, :] -= kappa_init
kappa_other = np.exp(self.kappa_sum[a, k, :])[np.newaxis, :]
converged = False
for j in range(taumax_it):
if(not(np.any(kappa_init))):
gaussprec = 1
else:
gaussprec = opt_tau(kappa_init)
result = sp.optimize.minimize(fun=kappa_obj, x0=kappa_init,
args=(kappa_other, c_k, bigC_k, gaussprec),
jac=kappa_grad, method="L-BFGS-B", options={'maxiter': kappamax_it})
kappa_init = result.x
converged = np.mean(np.abs(self.kappa_params[i] - kappa_init))
self.kappa_params[i] = kappa_init
if converged <= tautol:
break
if i < i_update_kv:
self.kappa_sum[:, k, :] += self.kappa_params[i]
elif i < i_update_ka:
self.kappa_sum[a, :, :] += self.kappa_params[i]
elif i < i_update_kav:
self.kappa_sum[a, k, :] += self.kappa_params[i]
current = np.abs(self.kappa_params) < .001
sparseagree = np.average(compare == current)
self.phi = np.exp(self.kappa_sum - logsumexp(self.kappa_sum, axis=2)[:, :, np.newaxis])
if sparseagree > tol:
break
def update_phi(self, E_count):
self.jeffereysKappa(E_count)
@abstractmethod
def calc_diff_var_and_mean(self, m):
pass
class STM_jeff_reg(STM_jeff_base):
def __init__(self, K, X, Y, docs, V, sigma, interact=True):
super().__init__(K, X, Y, docs, V, sigma, interact)
def calc_diff_var_and_mean(self, m):
return (self.eta[m, 0:self.K-1] - np.dot(self.X, self.Gamma)[m])
def update_mu_and_Gamma(self):
tmp_Gamma = utils.RVM_regression(self.eta, self.X, self.K)
self.Gamma = tmp_Gamma[:self.D, :self.K-1]
self.mu = np.dot(self.X, self.Gamma)
class STM_jeff_noX(STM_jeff_base):
def __init__(self, K, X, Y, docs, V, sigma, interact=True):
super().__init__(K, X, Y, docs, V, sigma, interact)
def calc_diff_var_and_mean(self, m):
return (self.eta[m, 0:self.K-1] - self.mu[m, 0:self.K-1])
def update_mu_and_Gamma(self):
self.mu = np.tile(np.sum(self.eta, axis=0) / self.D, (self.D, 1))
class STM_noY_base(STM_base):
def __init__(self, K, X, Y, docs, V, sigma, interact=True):
super().__init__(K, X, Y, docs, V, sigma, interact)
def calc_diff_var_and_mean(self, m):
pass
def update_phi(self, q_z):
# ref: Variational EM Algorithms for Correlated Topic Models / Mohhammad Emtiaz Khan et al
for k in range(self.K):
self.phi[0, k, :] = q_z[0, k, :]
self.phi[0, :, :] = q_z[0] / np.sum(q_z[0, :, :], axis=1)[:, np.newaxis]
class STM_noY_reg(STM_noY_base):
def __init__(self, K, X, Y, docs, V, sigma, interact=True):
super().__init__(K, X, Y, docs, V, sigma, interact)
def calc_diff_var_and_mean(self, m):
return (self.eta[m, 0:self.K-1] - np.dot(self.X, self.Gamma)[m])
def update_mu_and_Gamma(self):
tmp_Gamma = utils.RVM_regression(self.eta, self.X, self.K)
self.Gamma = tmp_Gamma[:self.D, :self.K-1]
self.mu = np.dot(self.X, self.Gamma)
class STM_noY_noX(STM_noY_base):
def __init__(self, K, X, Y, docs, V, sigma, interact=True):
super().__init__(K, X, Y, docs, V, sigma, interact)
def calc_diff_var_and_mean(self, m):
return (self.eta[m, 0:self.K-1] - self.mu[m, 0:self.K-1])
def update_mu_and_Gamma(self):
self.mu = np.tile(np.sum(self.eta, axis=0) / self.D, (self.D, 1))
def STM_factory_method(K, X, Y, docs, V, sigma, interact=True):
if Y is None:
if X is None:
return STM_noY_noX(K, X, Y, docs, V, sigma, interact)
else:
return STM_noY_reg(K, X, Y, docs, V, sigma, interact)
else:
if X is None:
return STM_jeff_noX(K, X, Y, docs, V, sigma, interact)
else:
return STM_jeff_reg(K, X, Y, docs, V, sigma, interact)
| 37.369748 | 118 | 0.558954 |
58ea09050c54396f817ad5f935d15749a6745089
| 262 |
py
|
Python
|
Sem-07-T2-02.py
|
daianasousa/Atividade-Remota-Semana-07
|
1c4a28bf052057e921730ba79dfb0cdaa74576e0
|
[
"MIT"
] | null | null | null |
Sem-07-T2-02.py
|
daianasousa/Atividade-Remota-Semana-07
|
1c4a28bf052057e921730ba79dfb0cdaa74576e0
|
[
"MIT"
] | null | null | null |
Sem-07-T2-02.py
|
daianasousa/Atividade-Remota-Semana-07
|
1c4a28bf052057e921730ba79dfb0cdaa74576e0
|
[
"MIT"
] | null | null | null |
def main():
n = int(input())
if n >= 2:
t1 = 0
t2 = 1
print(f'{t1}, {t2}', end='')
cont = 3
while cont <= n:
t3 = t1 + t2
print(f', {t3}', end='')
t1 = t2
t2 = t3
cont += 1
if __name__ == '__main__':
main()
| 17.466667 | 32 | 0.40458 |
2e18583f7e73efa5d6bffdb3727af2e51ff6bd04
| 14,330 |
py
|
Python
|
google/cloud/retail_v2/services/completion_service/transports/grpc.py
|
tetiana-karasova/python-retail
|
b834c1fb16212e59241267e18d38b490e962af7f
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/retail_v2/services/completion_service/transports/grpc.py
|
tetiana-karasova/python-retail
|
b834c1fb16212e59241267e18d38b490e962af7f
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/retail_v2/services/completion_service/transports/grpc.py
|
tetiana-karasova/python-retail
|
b834c1fb16212e59241267e18d38b490e962af7f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.retail_v2.types import completion_service
from google.cloud.retail_v2.types import import_config
from google.longrunning import operations_pb2 # type: ignore
from .base import CompletionServiceTransport, DEFAULT_CLIENT_INFO
class CompletionServiceGrpcTransport(CompletionServiceTransport):
"""gRPC backend transport for CompletionService.
Auto-completion service for retail.
This feature is only available for users who have Retail Search
enabled. Please enable Retail Search on Cloud Console before
using this feature.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "retail.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "retail.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service."""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def complete_query(
self,
) -> Callable[
[completion_service.CompleteQueryRequest],
completion_service.CompleteQueryResponse,
]:
r"""Return a callable for the complete query method over gRPC.
Completes the specified prefix with keyword
suggestions.
This feature is only available for users who have Retail
Search enabled. Please enable Retail Search on Cloud
Console before using this feature.
Returns:
Callable[[~.CompleteQueryRequest],
~.CompleteQueryResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "complete_query" not in self._stubs:
self._stubs["complete_query"] = self.grpc_channel.unary_unary(
"/google.cloud.retail.v2.CompletionService/CompleteQuery",
request_serializer=completion_service.CompleteQueryRequest.serialize,
response_deserializer=completion_service.CompleteQueryResponse.deserialize,
)
return self._stubs["complete_query"]
@property
def import_completion_data(
self,
) -> Callable[
[import_config.ImportCompletionDataRequest], operations_pb2.Operation
]:
r"""Return a callable for the import completion data method over gRPC.
Bulk import of processed completion dataset.
Request processing is asynchronous. Partial updating is
not supported.
The operation is successfully finished only after the
imported suggestions are indexed successfully and ready
for serving. The process takes hours.
This feature is only available for users who have Retail
Search enabled. Please enable Retail Search on Cloud
Console before using this feature.
Returns:
Callable[[~.ImportCompletionDataRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "import_completion_data" not in self._stubs:
self._stubs["import_completion_data"] = self.grpc_channel.unary_unary(
"/google.cloud.retail.v2.CompletionService/ImportCompletionData",
request_serializer=import_config.ImportCompletionDataRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["import_completion_data"]
def close(self):
self.grpc_channel.close()
__all__ = ("CompletionServiceGrpcTransport",)
| 44.092308 | 91 | 0.644731 |
3f95af1bf0fb076eaacd458403f420ad7aa72e5f
| 3,247 |
py
|
Python
|
zmk/testUseCase/TestAutoML.py
|
frenebo/ZMOD
|
58159fcbf61200c1ec2d6b92fca0cd9d4e83a208
|
[
"Apache-2.0"
] | 2 |
2020-11-29T00:16:37.000Z
|
2021-02-22T18:12:15.000Z
|
zmk/testUseCase/TestAutoML.py
|
frenebo/ZMOD
|
58159fcbf61200c1ec2d6b92fca0cd9d4e83a208
|
[
"Apache-2.0"
] | 20 |
2020-09-26T01:04:36.000Z
|
2022-03-02T08:38:18.000Z
|
zmk/testUseCase/TestAutoML.py
|
frenebo/ZMOD
|
58159fcbf61200c1ec2d6b92fca0cd9d4e83a208
|
[
"Apache-2.0"
] | 1 |
2020-02-29T12:18:28.000Z
|
2020-02-29T12:18:28.000Z
|
import requests
import json
import os
def autoMlSendData(filePath):
url='http://localhost:8000/api/v1/trainAutoMLModel'
param={'filePath':filePath}
res=requests.get(url,param)
kk=res.text
tempa=json.loads(kk)
return tempa
def autoMlTrain(filePath, idForData):
url2='http://localhost:8000/api/v1/trainAutoMLModel'
true=True
false=False
dataPreprocessingsteps={"data":[{"position":1,"variable":"mpg","dtype":"float64","missing_val":0,"changedataType":"Continuous","imputation_method":"None","data_transformation_step":"None","use_for_model":true},
{"position":2,"variable":"cylinders","dtype":"int64","missing_val":0,"changedataType":"Continuous","imputation_method":"None","data_transformation_step":"None","use_for_model":true},
{"position":3,"variable":"displacement","dtype":"float64","missing_val":0,"changedataType":"Continuous","imputation_method":"None","data_transformation_step":"None","use_for_model":true},
{"position":4,"variable":"horsepower","dtype":"float64","missing_val":6,"changedataType":"Continuous","imputation_method":"Mean","data_transformation_step":"None","use_for_model":true},
{"position":5,"variable":"weight","dtype":"int64","missing_val":0,"changedataType":"Continuous","imputation_method":"None","data_transformation_step":"None","use_for_model":true},
{"position":6,"variable":"acceleration","dtype":"float64","missing_val":0,"changedataType":"Continuous","imputation_method":"None","data_transformation_step":"None","use_for_model":true},
{"position":7,"variable":"model year","dtype":"int64","missing_val":0,"changedataType":"Categorical","imputation_method":"None","data_transformation_step":"None","use_for_model":true},
{"position":8,"variable":"origin","dtype":"int64","missing_val":0,"changedataType":"Categorical","imputation_method":"None","data_transformation_step":"One Hot Encoding","use_for_model":true},
{"position":9,"variable":"car name","dtype":"object","missing_val":0,"changedataType":"Categorical","imputation_method":"None","data_transformation_step":"None","use_for_model":false}
],"problem_type":"Regression","target_variable":"mpg","idforData":idForData,'newPMMLFileName':'xyz.pmml','filePath':filePath,"parameters": []}
headers = {'content-type': 'application/json'}
res2=requests.post(url2,headers=headers,json=dataPreprocessingsteps)
def checkStatusOfTraining(idForData):
url3='http://localhost:8000/api/v1/runningTasks/'+idForData
res3=requests.get(url3)
tempro=json.loads(res3.text)
return tempro
print('\n\n:::::::::::::::: Test for AutoML :::::::::::::::: \n\n')
path = os.getcwd()+'/'+'testUseCase/supportdata/'
path = path+'/'+'mpg_data_example.csv'
resp = autoMlSendData(path)
idforData = resp['idforData']
print('>>>> Data sent Sucessfully')
autoMlTrain(path, idforData)
print('>>>> Auto training is in progress')
respp = checkStatusOfTraining(idforData)
import time, sys
while(respp['status'] != 'Complete'):
time.sleep(15)
respp = checkStatusOfTraining(idforData)
if respp['status'] == 'Training Failed':
print('>>>> Training Failed')
print(respp)
sys.exit(1)
else:
print('>>>> ',respp['status'],'')
time.sleep(2)
| 50.734375 | 214 | 0.708654 |
91e7a2de71ef7139b1503f193992d16501bb48b5
| 1,530 |
py
|
Python
|
server.py
|
hugo19941994/movie-pepper-back
|
b5e05890c12b3a89cc758143c3b18599b1bcb4a1
|
[
"MIT"
] | null | null | null |
server.py
|
hugo19941994/movie-pepper-back
|
b5e05890c12b3a89cc758143c3b18599b1bcb4a1
|
[
"MIT"
] | 227 |
2017-10-07T23:21:50.000Z
|
2022-03-25T17:01:25.000Z
|
server.py
|
hugo19941994/movie-pepper-back
|
b5e05890c12b3a89cc758143c3b18599b1bcb4a1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
server.py
Flask server for recommender.py API
"""
from flask import Flask # type: ignore
from flask_cors import CORS # type: ignore
from flask import Response
from recommender import Recommender
from decorator import decorator
from doc2vec import Doc2VecModel
import json
import brotli
rec = Recommender()
d2v = Doc2VecModel()
app = Flask(__name__)
CORS(app)
@decorator
def brotlify(f, *args, **kwargs):
"""Brotli Flask Response Decorator"""
data = f(*args, **kwargs)
if isinstance(data, Response):
content = data.data
else:
content = data
deflated_data = brotli.compress(content)
if isinstance(data, Response):
data.data = deflated_data
data.headers['Content-Encoding'] = 'br'
data.headers['Content-Length'] = str(len(data.data))
return data
return deflated_data
@app.route("/movies")
@brotlify
def movies() -> Response:
return Response(
json.dumps(rec.get_movies()),
mimetype='application/json'
)
@app.route("/recommend/<string:title>")
@brotlify
def recommend(title: str) -> Response:
return Response(
json.dumps(rec.recommend(title)),
mimetype='application/json'
)
@app.route("/recommend-d2v/<string:title>")
@brotlify
def recommend_d2v(title: str) -> Response:
return Response(
json.dumps(d2v.recommendation(title, 10)),
mimetype='application/json'
)
if __name__ == "__main__":
app.run(host="0.0.0.0",
port=5000)
| 19.87013 | 60 | 0.660784 |
7a6944aef04421b6b88cda11d2fc59c14bf14ac8
| 8,585 |
py
|
Python
|
wargame/models.py
|
radl97/wargame-web
|
4062b05717bacb5c1a2c178f3a8bd07af7a18b10
|
[
"MIT"
] | 2 |
2020-10-06T17:07:32.000Z
|
2020-10-15T09:25:42.000Z
|
wargame/models.py
|
radl97/wargame-web
|
4062b05717bacb5c1a2c178f3a8bd07af7a18b10
|
[
"MIT"
] | 67 |
2018-06-22T09:12:44.000Z
|
2022-03-11T23:34:39.000Z
|
wargame/models.py
|
radl97/wargame-web
|
4062b05717bacb5c1a2c178f3a8bd07af7a18b10
|
[
"MIT"
] | 2 |
2020-10-05T21:13:48.000Z
|
2020-10-10T13:46:20.000Z
|
import os
from django.contrib.auth.models import AbstractUser, Permission
from django.contrib.auth.validators import UnicodeUsernameValidator
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import F, Sum, Max, Q
from django.db.models.expressions import ExpressionWrapper
from django.db.models.fields import IntegerField
from django.db.models.functions import Coalesce, Cast
from django.dispatch import receiver
from markdownx.models import MarkdownxField
from taggit.managers import TaggableManager
import wargame_web.settings.base as settings
from wargame_admin.models import Config
def custom_username_validator(username):
message = "Enter a valid username. This value may contain only letters, numbers, and @/+/-/_ characters."
if "~" in username or "/" in username or "." in username:
raise ValidationError(message)
return UnicodeUsernameValidator(message=message).__call__(username)
class User(AbstractUser):
hidden = models.BooleanField(default=False)
username = models.CharField(
"username",
max_length=150,
unique=True,
help_text="Required. 150 characters or fewer. Letters, digits and @/+/-/_ only.",
validators=[custom_username_validator],
error_messages={"unique": "A user with that username already exists."},
)
def admin_str(self):
if self.is_superuser:
return "Admin"
return "Not admin"
def hidden_str(self):
if self.hidden:
return "Hidden from scoreboard"
return "Visible on scoreboard"
def active_str(self):
if self.is_active:
return "Active"
return "Not active"
def get_score(self):
if Config.objects.is_qpa():
flag_field = F("challenge__flag_qpa")
else:
flag_field = F("challenge__flag_hacktivity")
challenge_points = F("challenge__points")
hint_used = Cast("hint_used", IntegerField())
user_points = ExpressionWrapper(challenge_points - (hint_used * challenge_points * 0.5), output_field=IntegerField())
return (
UserChallenge.objects.filter(user=self, submission__value__iexact=flag_field, challenge__hidden=False)
.annotate(points_with_hint=user_points)
.aggregate(total_points=Coalesce(Sum("points_with_hint"), 0))
.get("total_points")
)
@staticmethod
def get_top_40_by_score():
if Config.objects.is_qpa():
flag_field = F("userchallenge__challenge__flag_qpa")
else:
flag_field = F("userchallenge__challenge__flag_hacktivity")
challenge_points = F("userchallenge__challenge__points")
hint_used = Cast("userchallenge__hint_used", IntegerField())
user_points = ExpressionWrapper(challenge_points - (hint_used * challenge_points * 0.5), output_field=IntegerField())
return (
User.objects.filter(
userchallenge__submission__value__iexact=flag_field, userchallenge__challenge__hidden=False, hidden=False
)
.values("username")
.annotate(total_points=Coalesce(Sum(user_points), 0))
.order_by("-total_points")[:40]
)
def get_visible_level(self):
if Config.objects.stage_tasks() == 0:
return Challenge.objects.aggregate(Max("level"))["level__max"]
if Config.objects.is_qpa():
flag_field = F("challenge__flag_qpa")
else:
flag_field = F("challenge__flag_hacktivity")
user_max_level = self.userchallenge_set.all().aggregate(max_level=Coalesce(Max("challenge__level"), 1))["max_level"]
solved_challenges_at_max_level = UserChallenge.objects.filter(
challenge__level=user_max_level, challenge__hidden=False, user=self, submission__value__iexact=flag_field
).count()
if solved_challenges_at_max_level >= Config.objects.stage_tasks():
user_max_level += 1
return user_max_level
def get_visible_challenges(self):
level = self.get_visible_level()
if Config.objects.is_qpa():
flag_field = F("flag_qpa")
else:
flag_field = F("flag_hacktivity")
return (
Challenge.objects.filter(level__lte=level, hidden=False)
.annotate(
solved=Sum(
Cast(Q(userchallenge__submission__value__iexact=flag_field, userchallenge__user=self), IntegerField())
)
)
.order_by("level", "title")
)
def is_challenge_visible(self, challenge):
return challenge.level <= self.get_visible_level() and not challenge.hidden
class Challenge(models.Model):
title = models.CharField(max_length=256)
creation_dt = models.DateTimeField(auto_now_add=True)
description = MarkdownxField()
short_description = models.CharField(max_length=512, default="")
level = models.IntegerField()
flag_qpa = models.CharField(max_length=256, null=True, verbose_name="Flag (QPA)")
flag_hacktivity = models.CharField(max_length=256, null=True, verbose_name="Flag (Hacktivity)")
points = models.IntegerField()
hint = models.CharField(max_length=8192, null=True)
solution = models.CharField(max_length=8192, null=True)
setup = models.CharField(max_length=8192, null=True, blank=True)
import_name = models.CharField(max_length=64, verbose_name="Internal name", unique=True)
tags = TaggableManager()
hidden = models.BooleanField(default=False)
def __str__(self):
return self.title
def get_flag(self):
if Config.objects.is_qpa():
return self.flag_qpa
else:
return self.flag_hacktivity
def get_files(self):
return self.files.filter(config_name=Config.objects.config_name().value)
def tag_list(self):
return ", ".join(self.tags.names())
def users_attempted(self):
return self.userchallenge_set.count()
def users_solved(self):
return self.userchallenge_set.filter(submission__value__iexact=self.get_flag()).count()
def hidden_str(self):
if self.hidden:
return "Hidden"
return "Visible"
class File(models.Model):
CONFIG_CHOICES = (("qpa", "qpa"), ("hacktivity", "hacktivity"))
challenge = models.ForeignKey(Challenge, on_delete=models.CASCADE, related_name="files")
file = models.FileField(upload_to="challenge-files/")
filename = models.CharField(max_length=256)
display_name = models.CharField(max_length=256)
private = models.BooleanField(default=False)
config_name = models.CharField(max_length=20, null=False, blank=False, choices=CONFIG_CHOICES)
# Deletes file from filesystem when File object is deleted.
@receiver(models.signals.post_delete, sender=File)
def auto_delete_file_on_delete(sender, instance, **kwargs):
if instance.file:
if os.path.isfile(instance.file.path):
os.remove(instance.file.path)
class UserChallenge(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
challenge = models.ForeignKey(Challenge, on_delete=models.CASCADE)
hint_used = models.BooleanField(default=False)
class Meta:
unique_together = (("user", "challenge"),)
@staticmethod
def get_or_create(user, challenge):
try:
ret = UserChallenge.objects.get(user=user, challenge=challenge)
except UserChallenge.DoesNotExist:
ret = UserChallenge()
ret.user = user
ret.challenge = challenge
ret.save()
return ret
def solved(self):
return self.submission_set.filter(value__iexact=self.challenge.get_flag()).exists()
class Submission(models.Model):
creation_dt = models.DateTimeField(auto_now_add=True)
value = models.CharField(max_length=256)
user_challenge = models.ForeignKey(UserChallenge, on_delete=models.CASCADE)
times = models.IntegerField(default=0)
class Meta:
unique_together = ("user_challenge", "value")
@staticmethod
def get_or_create(userchallenge, value):
try:
ret = userchallenge.submission_set.get(value=value)
except Submission.DoesNotExist:
ret = Submission()
ret.user_challenge = userchallenge
ret.value = value
ret.save()
return ret
class StaffMember(models.Model):
name = models.CharField(max_length=256)
def __str__(self):
return self.name
| 35.475207 | 125 | 0.678858 |
5ffbd910be95e4eab3fde928414cb6740413c52a
| 2,711 |
py
|
Python
|
aguas_altas/preprocessing/audio_to_spectro_image.py
|
PEM-Humboldt/caracterizacion_paisajes_sonoros_ppii
|
2b99a69faeb5cc094e582a2b6929ef18bd4a3c4e
|
[
"MIT"
] | null | null | null |
aguas_altas/preprocessing/audio_to_spectro_image.py
|
PEM-Humboldt/caracterizacion_paisajes_sonoros_ppii
|
2b99a69faeb5cc094e582a2b6929ef18bd4a3c4e
|
[
"MIT"
] | null | null | null |
aguas_altas/preprocessing/audio_to_spectro_image.py
|
PEM-Humboldt/caracterizacion_paisajes_sonoros_ppii
|
2b99a69faeb5cc094e582a2b6929ef18bd4a3c4e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Load data from pickle files and save images of spectrogram
The pipeline includes:
- A low Butterworth pass filter
- Spectrogram computation
- A gaussian smoothing of the spectrogram
- Nomalization of the spectrogram accoring to vmin, vmax values
@author: jsulloa
"""
import numpy as np
import pickle
from maad import sound, util
from personal_utilities import listdir_pattern, crossfade_list
from skimage import io
from librosa import output
from personal_utilities import butter_filter
from skimage.filters import gaussian
#%% settings
fs = 192000
opt_spec = {'wl': 4096, 'ovlp': 0.5, 'fcrop': [10,60000], 'db_range': 250}
fpath = '/Volumes/PAPAYA/ANH/pkl_data/'
path_save = '/Volumes/PAPAYA/ANH/pkl_data/'
fmt = '.png'
tlims = [00,24] # tiempo en horas
write_wav = True
#%%
im_dict= dict()
# load elements
flist_dir = listdir_pattern(fpath, ends_with='pkl')
for fname_open in flist_dir:
print('Processing file:', fname_open)
pickle_in = open(fpath+fname_open,'rb')
s_dict = pickle.load(pickle_in)
flist = s_dict['flist']
# filter flist
idx_time = (flist.date_fmt.dt.hour >= tlims[0]) & (flist.date_fmt.dt.hour <= tlims[1])
flist = flist.loc[idx_time,:]
flist_days = flist.groupby(flist.date_fmt.dt.dayofyear)
# iterate by day
for day, flist_day in flist_days:
date = flist_day.date_fmt.iloc[0].strftime('%y-%m-%d')
print('Processing date: ', date)
# concat audio into array
s_sum = list()
for index, row in flist_day.iterrows():
s = s_dict[row.date]['s']
s_sum.append(s)
# crossfade and high pass filtering
#s_sum = crossfade_list(s_sum, fs)
#s_sum = butter_filter(s_sum,cutoff=200, fs=fs, order=2, ftype='high')
s_sum = np.concatenate(s_sum, axis=0)
# compute spectrogram
im, dt, df, ext = sound.spectrogram(s_sum, fs, nperseg=opt_spec['wl'],
overlap=opt_spec['ovlp'], flims=opt_spec['fcrop'])
im = util.power2dB(im, 90) + 90
# Apply gaussian smoothing
im = gaussian(im, sigma=0.5, mode='reflect')
# Normalize spectrogram according to sensor model
vmin, vmax = 0, 66 # Audiomoth
im[im<vmin] = vmin
im[im>vmax] = vmax
im = (im - im.min())/(im.max() - im.min())
# save to file
im = np.flip(im, axis=0)
key = fname_open[0:-4]+'_'+date
io.imsave(path_save+key+fmt, im)
if write_wav:
output.write_wav(path_save+key+'.wav', s_sum, fs)
else:
pass
| 32.662651 | 94 | 0.617484 |
5a910533c9381908cc0829a39aae7a630f1a9385
| 8,818 |
py
|
Python
|
datasets/augmentations.py
|
ariel415el/CTSegmentation-Pytorch
|
9584c20fd009b93211d6d89afa0df7aaecd31468
|
[
"Apache-2.0"
] | null | null | null |
datasets/augmentations.py
|
ariel415el/CTSegmentation-Pytorch
|
9584c20fd009b93211d6d89afa0df7aaecd31468
|
[
"Apache-2.0"
] | null | null | null |
datasets/augmentations.py
|
ariel415el/CTSegmentation-Pytorch
|
9584c20fd009b93211d6d89afa0df7aaecd31468
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import torch
from torchvision.transforms import transforms, InterpolationMode
from scipy.ndimage import map_coordinates
import torchvision.transforms.functional as F
class Resize:
def __init__(self, rescale=128):
self.rescale = rescale
self.resize_ct = transforms.Resize((self.rescale, self.rescale))
self.resize_gt = transforms.Resize((self.rescale, self.rescale), interpolation=InterpolationMode.NEAREST)
def __call__(self, sample):
image, segmap = sample
image = self.resize_ct(image)
segmap = self.resize_gt(segmap)
# image = transforms.Resize((self.rescale, self.rescale))(torch.from_numpy(image).unsqueeze(0))[0].numpy()
# segmap = transforms.Resize((self.rescale, self.rescale), interpolation=InterpolationMode.NEAREST)(segmap[None, :])[0]
return image, segmap
class HistogramEqualization:
def __init__(self, nbins):
self.nbins = nbins
def __call__(self, sample):
image, segmap = sample
image_histogram, bins = np.histogram(image.flatten(), bins=256, density=True)
cdf = image_histogram.cumsum() # cumulative distribution function
cdf = cdf / cdf[-1] # normalize to [0,255]
# use linear interpolation of cdf to find new pixel values
image = np.interp(image.flatten(), bins[:-1], cdf).reshape(image.shape)
return image, segmap
class RandomScale:
def __init__(self, p=0.5, scale_range=(128,256)):
self.p = p
self.scale_range = scale_range
def __call__(self, sample):
image, segmap = sample
if torch.rand(1) < self.p:
h, w = image.shape[-2:]
new_w = np.random.randint(*self.scale_range)
new_h = h * new_w // w
image = transforms.Resize((new_h, new_w))(torch.from_numpy(image).unsqueeze(0))[0].numpy()
segmap = transforms.Resize((new_h, new_w), interpolation=InterpolationMode.NEAREST)(torch.from_numpy(segmap).unsqueeze(0))[0].numpy()
return image, segmap
class RandomCrop:
"""Crop randomly the image in a sample.
Args:
scale_range: tuple: range of possible crop factor for each dimenstion.
"""
def __init__(self, p=0.5, scale_range=(0.8, 1)):
self.p = p
self.scale_range = scale_range
def __call__(self, sample):
image, segmap = sample
if torch.rand(1) < self.p:
h, w = image.shape[-2:]
new_h = int(np.random.uniform(*self.scale_range) * h)
new_w = int(np.random.uniform(*self.scale_range) * w)
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[..., top: top + new_h, left: left + new_w]
segmap = segmap[..., top: top + new_h, left: left + new_w]
return image, segmap
# class ElasticDeformation3D_package:
# def __init__(self, sigma=25, n_points=3, p=0.5):
# self.sigma = sigma
# self.n_points = n_points
# self.p = p
#
# def __call__(self, sample):
# if torch.rand(1) < self.p:
# import elasticdeform
# # return elasticdeform.deform_random_grid([X, Y], sigma=self.sigma, order=[1, 0], mode='nearest', axis=(1, 2)) # only spatialy (same for all slices)
# sample = elasticdeform.deform_random_grid(list(sample), sigma=self.sigma, order=[1, 0], mode='nearest')
# return image, segmap
class ElasticDeformation3D:
def __init__(self, sigma=25, n_points=3, p=0.5, order=1):
"""
taken from https://github.com/fcalvet/image_tools/blob/master/image_augmentation.py
Elastic deformation of 2D or 3D images on a gridwise basis
X: image
Y: segmentation of the image
sigma = standard deviation of the normal distribution
points = number of points of the each side of the square grid
Elastic deformation approach found in
Ronneberger, Fischer, and Brox, "U-Net: Convolutional Networks for Biomedical
Image Segmentation" also used in Çiçek et al., "3D U-Net: Learning Dense Volumetric
Segmentation from Sparse Annotation"
based on a coarsed displacement grid interpolated to generate displacement for every pixel
deemed to represent more realistic, biologically explainable deformation of the image
for each dimension, a value for the displacement is generated on each point of the grid
then interpolated to give an array of displacement values, which is then added to the corresponding array of coordinates
the resulting (list of) array of coordinates is mapped to the original image to give the final image
"""
self.sigma = sigma
self.n_points = n_points
self.order=order
self.p = p
def __call__(self, sample):
image, segmap = sample
if torch.rand(1) < self.p:
S, H, W = image.shape
# creates the grid of coordinates of the voxels of the image (an ndim array per dimension)
voxel_coordinates = np.meshgrid(np.arange(S),
np.arange(H),
np.arange(W),
indexing='ij')
# creates the grid of coordinates of the points of the image in the "deformation grid" frame of reference
coordinate_grid_0_to_n_points = np.meshgrid(np.linspace(0, self.n_points - 1, S),
np.linspace(0, self.n_points - 1, H),
np.linspace(0, self.n_points - 1, W),
indexing='ij')
# creates the deformation along each dimension and then add it to the coordinates
for i in range(len(voxel_coordinates)):
rand_displacements = np.random.randn(self.n_points, self.n_points, self.n_points) * self.sigma # creating the displacement at the control points
interp_displacements = map_coordinates(rand_displacements, coordinate_grid_0_to_n_points, order=self.order).reshape(image.shape)
voxel_coordinates[i] = np.add(voxel_coordinates[i], interp_displacements) # adding the displacement
image = map_coordinates(image, voxel_coordinates, order=self.order, mode='nearest').reshape(image.shape)
segmap = map_coordinates(segmap, voxel_coordinates, order=0, mode='nearest').reshape(segmap.shape)
return image, segmap
class RandomAffine:
def __init__(self, p=0.5, degrees=(30, 70), translate=(0.1, 0.3), scale=(0.5, 0.75)):
self.p = p
self.degrees = list(degrees if degrees is not None else [0,0])
self.translate = list(translate if translate is not None else [0,0])
self.scale = list(scale if scale is not None else [1,1])
def __call__(self, sample):
image, segmap = sample
if torch.rand(1) < self.p:
image_size = F._get_image_size(image)
ret = transforms.RandomAffine.get_params(self.degrees, self.translate, self.scale, None, img_size=image_size)
image = F.affine(image, *ret, interpolation=InterpolationMode.BILINEAR)
segmap = F.affine(segmap, *ret, interpolation=InterpolationMode.NEAREST)
return image, segmap
class random_flips:
def __init__(self, p=0.5):
self.p = p
def __call__(self, sample):
image, segmap = sample
if torch.rand(1) < self.p:
if torch.rand(1) < 0.5:
image, segmap = F.hflip(image), F.hflip(segmap)
if torch.rand(1) < 0.5:
image, segmap = F.vflip(image), F.vflip(segmap)
return image, segmap
class random_noise:
def __init__(self, p=0.5, std_factor=0.5):
self.p = p
self.std_factor = std_factor
def __call__(self, sample):
image, segmap = sample
if torch.rand(1) < self.p:
dtype = image.dtype
image = image.float()
image += torch.randn(image.shape) * self.std_factor * image.std()
image = image.to(dtype=dtype)
return image, segmap
class random_clip:
def __init__(self, min_interval=(-512, -511), max_interval=(512,513)):
self.min_interval = min_interval
self.max_interval = max_interval
def __call__(self, sample):
image, segmap = sample
min_v = self.min_interval if type(self.min_interval) == int else np.random.randint(*self.min_interval)
max_v = self.max_interval if type(self.max_interval) == int else np.random.randint(*self.max_interval)
image = image.clip(min_v, max_v)
return image, segmap
| 40.26484 | 162 | 0.622477 |
24cf2a4fe714808dc04514f957549e03f22236f1
| 906 |
py
|
Python
|
model/__init__.py
|
lorenz0890/pytorch-admm-pruning
|
85f15d86e6d9037fe4016ebcd435065ecba823b5
|
[
"BSD-3-Clause"
] | null | null | null |
model/__init__.py
|
lorenz0890/pytorch-admm-pruning
|
85f15d86e6d9037fe4016ebcd435065ecba823b5
|
[
"BSD-3-Clause"
] | null | null | null |
model/__init__.py
|
lorenz0890/pytorch-admm-pruning
|
85f15d86e6d9037fe4016ebcd435065ecba823b5
|
[
"BSD-3-Clause"
] | null | null | null |
from .alexnet_s import AlexNet_S
from .alexnet import AlexNet
from .lenet import LeNet
from .vgg16 import VGG16
from .vgg11 import VGG11
from .vgg13 import VGG13
from .vgg8 import VGG8
from .vgg16_bn import VGG16BN
from .vgg11_bn import VGG11BN
from .vgg13_bn import VGG13BN
from .vgg8_bn import VGG8BN
from .resnet18 import ResNet18
from .resnet20 import ResNet20
from .resnet32 import ResNet32
from .resnet34 import ResNet34
from .resnet44 import ResNet44
from .resnet50 import ResNet50
from .resnet56 import ResNet56
from .densenet121 import DenseNet121
from .densenet161 import DenseNet161
from .densenet169 import DenseNet169
from .densenet201 import DenseNet201
from .mobilenetv2 import MobileNetV2
from .mobilenetv3_s import MobileNetV3_S
from .mobilenetv3_l import MobileNetV3_L
from .wrn16_8 import WRN16_8
from .wrn16_10 import WRN16_10
from .wrn22_8 import WRN22_8
from .wrn28_10 import WRN28_10
| 31.241379 | 40 | 0.84106 |
1eb684a13ac8ac1a10c9b4a00c8d3d7940b1529e
| 225 |
py
|
Python
|
Documentation/sh/conf.py
|
fergy/aplit_linux-5
|
a6ef4cb0e17e1eec9743c064e65f730c49765711
|
[
"MIT"
] | 34 |
2019-07-19T20:44:15.000Z
|
2022-03-07T12:09:00.000Z
|
Documentation/sh/conf.py
|
fergy/aplit_linux-5
|
a6ef4cb0e17e1eec9743c064e65f730c49765711
|
[
"MIT"
] | 5 |
2020-04-04T09:24:09.000Z
|
2020-04-19T12:33:55.000Z
|
Documentation/sh/conf.py
|
fergy/aplit_linux-5
|
a6ef4cb0e17e1eec9743c064e65f730c49765711
|
[
"MIT"
] | 30 |
2018-05-02T08:43:27.000Z
|
2022-01-23T03:25:54.000Z
|
# -*- coding: utf-8; mode: python -*-
project = "SuperH architecture implementation manual"
tags.add("subproject")
latex_documents = [
('index', 'sh.tex', project,
'The kernel development community', 'manual'),
]
| 20.454545 | 53 | 0.662222 |
4634fa34ca464bdd816928453a65eeb82335d91d
| 4,785 |
py
|
Python
|
sdk/python/pulumi_azure_nextgen/compute/latest/get_ssh_public_key.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31 |
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/compute/latest/get_ssh_public_key.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231 |
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/compute/latest/get_ssh_public_key.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4 |
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetSshPublicKeyResult',
'AwaitableGetSshPublicKeyResult',
'get_ssh_public_key',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:compute:getSshPublicKey'.""", DeprecationWarning)
@pulumi.output_type
class GetSshPublicKeyResult:
"""
Specifies information about the SSH public key.
"""
def __init__(__self__, id=None, location=None, name=None, public_key=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if public_key and not isinstance(public_key, str):
raise TypeError("Expected argument 'public_key' to be a str")
pulumi.set(__self__, "public_key", public_key)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> Optional[str]:
"""
SSH public key used to authenticate to a virtual machine through ssh. If this property is not initially provided when the resource is created, the publicKey property will be populated when generateKeyPair is called. If the public key is provided upon resource creation, the provided public key needs to be at least 2048-bit and in ssh-rsa format.
"""
return pulumi.get(self, "public_key")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetSshPublicKeyResult(GetSshPublicKeyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSshPublicKeyResult(
id=self.id,
location=self.location,
name=self.name,
public_key=self.public_key,
tags=self.tags,
type=self.type)
def get_ssh_public_key(resource_group_name: Optional[str] = None,
ssh_public_key_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSshPublicKeyResult:
"""
Specifies information about the SSH public key.
Latest API Version: 2020-12-01.
:param str resource_group_name: The name of the resource group.
:param str ssh_public_key_name: The name of the SSH public key.
"""
pulumi.log.warn("get_ssh_public_key is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:compute:getSshPublicKey'.")
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['sshPublicKeyName'] = ssh_public_key_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:compute/latest:getSshPublicKey', __args__, opts=opts, typ=GetSshPublicKeyResult).value
return AwaitableGetSshPublicKeyResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
public_key=__ret__.public_key,
tags=__ret__.tags,
type=__ret__.type)
| 35.444444 | 354 | 0.649321 |
188f9e7e39db76b22026b69a5b03af1fe05ae63b
| 2,239 |
py
|
Python
|
airbyte-cdk/python/unit_tests/test_exception_handler.py
|
kattos-aws/airbyte
|
cbcbab4a2399c08d8f66d1b693ac824c245ba3da
|
[
"MIT"
] | null | null | null |
airbyte-cdk/python/unit_tests/test_exception_handler.py
|
kattos-aws/airbyte
|
cbcbab4a2399c08d8f66d1b693ac824c245ba3da
|
[
"MIT"
] | null | null | null |
airbyte-cdk/python/unit_tests/test_exception_handler.py
|
kattos-aws/airbyte
|
cbcbab4a2399c08d8f66d1b693ac824c245ba3da
|
[
"MIT"
] | 1 |
2022-02-19T17:22:50.000Z
|
2022-02-19T17:22:50.000Z
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import json
import subprocess
import sys
import pytest
from airbyte_cdk.models import AirbyteErrorTraceMessage, AirbyteLogMessage, AirbyteMessage, AirbyteTraceMessage
def test_uncaught_exception_handler():
cmd = "from airbyte_cdk.logger import init_logger; from airbyte_cdk.exception_handler import init_uncaught_exception_handler; logger = init_logger('airbyte'); init_uncaught_exception_handler(logger); raise 1"
exception_message = "exceptions must derive from BaseException"
exception_trace = (
"Traceback (most recent call last):\n"
' File "<string>", line 1, in <module>\n'
"TypeError: exceptions must derive from BaseException"
)
expected_log_message = AirbyteMessage(
type="LOG", log=AirbyteLogMessage(level="FATAL", message=f"{exception_message}\n{exception_trace}")
)
expected_trace_message = AirbyteMessage(
type="TRACE",
trace=AirbyteTraceMessage(
type="ERROR",
emitted_at=0.0,
error=AirbyteErrorTraceMessage(
failure_type="system_error",
message="Something went wrong in the connector. See the logs for more details.",
internal_message=exception_message,
stack_trace=f"{exception_trace}\n",
),
),
)
with pytest.raises(subprocess.CalledProcessError) as err:
subprocess.check_output([sys.executable, "-c", cmd], stderr=subprocess.STDOUT)
assert not err.value.stderr, "nothing on the stderr"
stdout_lines = err.value.output.decode("utf-8").strip().split("\n")
assert len(stdout_lines) == 2
log_output, trace_output = stdout_lines
out_log_message = AirbyteMessage.parse_obj(json.loads(log_output))
assert out_log_message == expected_log_message, "Log message should be emitted in expected form"
out_trace_message = AirbyteMessage.parse_obj(json.loads(trace_output))
assert out_trace_message.trace.emitted_at > 0
out_trace_message.trace.emitted_at = 0.0 # set a specific emitted_at value for testing
assert out_trace_message == expected_trace_message, "Trace message should be emitted in expected form"
| 38.603448 | 212 | 0.711925 |
a8d3725a4278d037976b23f7102fc34546cde65c
| 3,446 |
py
|
Python
|
efficientdet/tf2/label_util.py
|
sujitahirrao/automl
|
e82d92d9ccca72e54e4c85188345f110ca7dfc3c
|
[
"Apache-2.0"
] | 5,277 |
2020-03-12T23:09:47.000Z
|
2022-03-30T17:28:35.000Z
|
efficientdet/tf2/label_util.py
|
sujitahirrao/automl
|
e82d92d9ccca72e54e4c85188345f110ca7dfc3c
|
[
"Apache-2.0"
] | 988 |
2020-03-17T02:53:40.000Z
|
2022-03-17T19:34:10.000Z
|
efficientdet/tf2/label_util.py
|
sujitahirrao/automl
|
e82d92d9ccca72e54e4c85188345f110ca7dfc3c
|
[
"Apache-2.0"
] | 1,486 |
2020-03-14T05:15:22.000Z
|
2022-03-29T02:28:56.000Z
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A few predefined label id mapping."""
import tensorflow as tf
import yaml
import hparams_config
coco = {
# 0: 'background',
1: 'person',
2: 'bicycle',
3: 'car',
4: 'motorcycle',
5: 'airplane',
6: 'bus',
7: 'train',
8: 'truck',
9: 'boat',
10: 'traffic light',
11: 'fire hydrant',
13: 'stop sign',
14: 'parking meter',
15: 'bench',
16: 'bird',
17: 'cat',
18: 'dog',
19: 'horse',
20: 'sheep',
21: 'cow',
22: 'elephant',
23: 'bear',
24: 'zebra',
25: 'giraffe',
27: 'backpack',
28: 'umbrella',
31: 'handbag',
32: 'tie',
33: 'suitcase',
34: 'frisbee',
35: 'skis',
36: 'snowboard',
37: 'sports ball',
38: 'kite',
39: 'baseball bat',
40: 'baseball glove',
41: 'skateboard',
42: 'surfboard',
43: 'tennis racket',
44: 'bottle',
46: 'wine glass',
47: 'cup',
48: 'fork',
49: 'knife',
50: 'spoon',
51: 'bowl',
52: 'banana',
53: 'apple',
54: 'sandwich',
55: 'orange',
56: 'broccoli',
57: 'carrot',
58: 'hot dog',
59: 'pizza',
60: 'donut',
61: 'cake',
62: 'chair',
63: 'couch',
64: 'potted plant',
65: 'bed',
67: 'dining table',
70: 'toilet',
72: 'tv',
73: 'laptop',
74: 'mouse',
75: 'remote',
76: 'keyboard',
77: 'cell phone',
78: 'microwave',
79: 'oven',
80: 'toaster',
81: 'sink',
82: 'refrigerator',
84: 'book',
85: 'clock',
86: 'vase',
87: 'scissors',
88: 'teddy bear',
89: 'hair drier',
90: 'toothbrush',
}
voc = {
# 0: 'background',
1: 'aeroplane',
2: 'bicycle',
3: 'bird',
4: 'boat',
5: 'bottle',
6: 'bus',
7: 'car',
8: 'cat',
9: 'chair',
10: 'cow',
11: 'diningtable',
12: 'dog',
13: 'horse',
14: 'motorbike',
15: 'person',
16: 'pottedplant',
17: 'sheep',
18: 'sofa',
19: 'train',
20: 'tvmonitor',
}
waymo = {
# 0: 'background',
1: 'vehicle',
2: 'pedestrian',
3: 'cyclist',
}
def get_label_map(mapping):
"""Get label id map based on the name, filename, or dict."""
# case 1: if it is None or dict, just return it.
if not mapping or isinstance(mapping, dict):
return mapping
if isinstance(mapping, hparams_config.Config):
return mapping.as_dict()
# case 2: if it is a yaml file, load it to a dict and return the dict.
assert isinstance(mapping, str), 'mapping must be dict or str.'
if mapping.endswith('.yaml'):
with tf.io.gfile.GFile(mapping) as f:
return yaml.load(f, Loader=yaml.FullLoader)
# case 3: it is a name of a predefined dataset.
return {'coco': coco, 'voc': voc, 'waymo': waymo}[mapping]
| 22.522876 | 80 | 0.548172 |
4431d58a979cd506b3b9d5eb55e3d1b655f75071
| 1,458 |
py
|
Python
|
cassiopeia/__init__.py
|
artemigkh/cassiopeia
|
fa78cb8f86ea21857916a707d04de6a05498033e
|
[
"MIT"
] | null | null | null |
cassiopeia/__init__.py
|
artemigkh/cassiopeia
|
fa78cb8f86ea21857916a707d04de6a05498033e
|
[
"MIT"
] | null | null | null |
cassiopeia/__init__.py
|
artemigkh/cassiopeia
|
fa78cb8f86ea21857916a707d04de6a05498033e
|
[
"MIT"
] | null | null | null |
# Initialize the settings singleton
from ._configuration import get_default_config, Settings, CassiopeiaConfiguration as _CassiopeiaConfiguration
configuration = _CassiopeiaConfiguration()
from .cassiopeia import get_realms, get_challenger_league, get_champion_masteries, get_champion, get_champion_mastery, get_champions, get_current_match, get_featured_matches, get_items, get_language_strings, get_locales, get_league_entries, get_leagues, get_maps, get_master_league, get_grandmaster_league, get_match, get_match_history, get_profile_icons, get_runes, get_status, get_summoner, get_summoner_spells, get_version, get_versions, get_champion_rotations, get_paginated_league_entries, get_verification_string
from .cassiopeia import apply_settings, set_riot_api_key, set_default_region, print_calls, _get_pipeline
from .core import Champion, Champions, Rune, Runes, Item, Items, SummonerSpell, SummonerSpells, ProfileIcon, ProfileIcons, Versions, Maps, Summoner, ChampionMastery, ChampionMasteries, Match, FeaturedMatches, ShardStatus, ChallengerLeague, GrandmasterLeague, MasterLeague, Map, Realms, LanguageStrings, Locales, LeagueEntries, League, Patch, VerificationString, MatchHistory, ChampionRotation, LeagueSummonerEntries, CurrentMatch
from .data import Queue, Region, Platform, Resource, Side, GameMode, MasteryTree, Tier, Division, Season, GameType, Lane, Role, Rank, Key, SummonersRiftArea, Tower, Position
apply_settings(configuration.settings)
| 132.545455 | 518 | 0.856653 |
9cfbe231b94a445ca1168300057ca68462ab39c5
| 3,672 |
py
|
Python
|
mi/dataset/driver/ctdpf_ckl/wfp/coastal_ctdpf_ckl_wfp_recovered_driver.py
|
cwingard/mi-instrument
|
bea2dedcf0633a24d62d875b99e7c11aa1ad7fe4
|
[
"BSD-2-Clause"
] | 1 |
2018-09-14T23:28:29.000Z
|
2018-09-14T23:28:29.000Z
|
mi/dataset/driver/ctdpf_ckl/wfp/coastal_ctdpf_ckl_wfp_recovered_driver.py
|
cwingard/mi-instrument
|
bea2dedcf0633a24d62d875b99e7c11aa1ad7fe4
|
[
"BSD-2-Clause"
] | 33 |
2017-04-25T19:53:45.000Z
|
2022-03-18T17:42:18.000Z
|
mi/dataset/driver/ctdpf_ckl/wfp/coastal_ctdpf_ckl_wfp_recovered_driver.py
|
cwingard/mi-instrument
|
bea2dedcf0633a24d62d875b99e7c11aa1ad7fe4
|
[
"BSD-2-Clause"
] | 31 |
2015-03-04T01:01:09.000Z
|
2020-10-28T14:42:12.000Z
|
#!/usr/local/bin/python2.7
##
# OOIPLACEHOLDER
#
# Copyright 2014 Raytheon Co.
##
import os
from mi.core.versioning import version
from mi.dataset.dataset_driver import SimpleDatasetDriver, ParticleDataHandler
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.parser.ctdpf_ckl_wfp import CtdpfCklWfpParser, \
METADATA_PARTICLE_CLASS_KEY, \
DATA_PARTICLE_CLASS_KEY
from mi.dataset.parser.wfp_c_file_common import WfpCFileCommonConfigKeys
from mi.dataset.parser.ctdpf_ckl_wfp_particles import \
CtdpfCklWfpRecoveredDataParticle, \
CtdpfCklWfpRecoveredMetadataParticle, \
CtdpfCklWfpDataParticleKey
from mi.dataset.driver.flort_kn.stc_imodem.flort_kn__stc_imodem_driver import FlortKnStcImodemDriver
from mi.core.log import get_logger
log = get_logger()
class CoastalCtdpfCklWfpRecoveredDriver(SimpleDatasetDriver):
"""
Derived wc_wm_cspp driver class
All this needs to do is create a concrete _build_parser method
"""
def __init__(self, unused, stream_handle, particle_data_handler, e_file_time_pressure_tuples):
self._e_file_time_pressure_tuples = e_file_time_pressure_tuples
super(CoastalCtdpfCklWfpRecoveredDriver, self).__init__(unused, stream_handle, particle_data_handler)
def _build_parser(self, stream_handle):
parser_config = {
WfpCFileCommonConfigKeys.PRESSURE_FIELD_C_FILE: CtdpfCklWfpDataParticleKey.PRESSURE,
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: CtdpfCklWfpRecoveredMetadataParticle,
DATA_PARTICLE_CLASS_KEY: CtdpfCklWfpRecoveredDataParticle
}
}
file_size = os.path.getsize(stream_handle.name)
parser = CtdpfCklWfpParser(parser_config,
stream_handle,
self._exception_callback,
file_size,
self._e_file_time_pressure_tuples)
return parser
@version("0.0.1")
def parse(unused, source_file_path, particle_data_handler):
"""
This is the method called by Uframe
:param unused
:param source_file_path This is the full path and filename of the file to be parsed
:param particle_data_handler Java Object to consume the output of the parser
:return particle_data_handler
"""
# Get the flort file name from the ctd file name
head, tail = os.path.split(source_file_path)
tail = tail.replace('C', 'E')
flort_source_file_path = os.path.join(head, tail)
# Parse the flort file to get a list of (time, pressure) tuples.
try:
flort_particle_data_handler = ParticleDataHandler()
with open(flort_source_file_path, 'rb') as flort_stream_handle:
driver = FlortKnStcImodemDriver(unused, flort_stream_handle, flort_particle_data_handler)
e_file_time_pressure_tuples = driver.get_time_pressure_tuples()
except Exception as e:
log.error(e)
return particle_data_handler
if not e_file_time_pressure_tuples:
log.error('Time-Pressure tuples not extracted from %s', flort_source_file_path)
return particle_data_handler
# Parse the ctd file and use the e_file_time_pressure_tuples to generate
# the internal timestamps of the particles
with open(source_file_path, 'rb') as stream_handle:
driver = CoastalCtdpfCklWfpRecoveredDriver(
unused, stream_handle, particle_data_handler, e_file_time_pressure_tuples)
driver.processFileStream()
return particle_data_handler
| 37.85567 | 109 | 0.729303 |
bf26baac998236e3c8d8f6e23d83f96f4db24ec6
| 1,652 |
py
|
Python
|
conf/config.py
|
ZeroyiQ/GoodGoodName
|
705c533acfe7953816dd81ea5532bb9be16ae557
|
[
"MIT"
] | 6 |
2020-12-24T17:41:05.000Z
|
2021-07-13T14:43:16.000Z
|
conf/config.py
|
ZeroyiQ/GoodGoodName
|
705c533acfe7953816dd81ea5532bb9be16ae557
|
[
"MIT"
] | null | null | null |
conf/config.py
|
ZeroyiQ/GoodGoodName
|
705c533acfe7953816dd81ea5532bb9be16ae557
|
[
"MIT"
] | 5 |
2020-10-31T03:55:29.000Z
|
2021-05-28T02:27:20.000Z
|
# 强烈建议修改
LAST_NAME = '于' # 姓氏
SEX = '女' # 孩子性别,男 或者 女
YEAR = 2020 # 出生的时间:年
MONTH = 12 # 出生的时间:月
DATE = 7 # 出生的时间:日
HOUR = 12 # 出生的时间:小时
MINUTE = 00 # 出生的时间: 分钟
# 选择性修改
MIN_SINGLE_NUM = 2 # 单个字最少笔画过滤
MAX_SINGLE_NUM = 20 # 单个字最多笔画过滤
THRESHOLD_SCORE = 85 # 三才五格测试最低能接受的得分,结果记录在RESULT_FILE
SELECTED_XITONGSHEN = None # 已知的喜用神,或者次喜用神。None表示没关系。这个喜用神自己在网站查查,选填,填了可能没有最佳匹配结果
# 尽量别改,除非你知道是什么意思
debug = False
my_write_num_list = [(7, 10)] # 经过第一轮测试后笔画的结果, 自己记录下来
true_request = True # 真实请求测试
# 名字固定要的字
fix_write_word = ''
SELECTED_SANCAI = ['大吉', '中吉'] # 三才中,如果为None就不特意选最好的
# 首先在http://www.qimingzi.net/ 网站提交基本信息,点击开始起名,F12查看请求信息把Cookie复制下来
headers = { "Cookie": "__51cke__=; Hm_lvt_1f1b125fd1b03fdb6cac5abdd0f5d306=1603866097,1603882167; ASP.NET_SessionId=typwis2xwc1cm5kd2iehtskw; __tins__20674741=%7B%22sid%22%3A%201603952255714%2C%20%22vd%22%3A%203%2C%20%22expires%22%3A%201603954232685%7D; 53gid2=10308951834010; visitor_type=new; 53gid0=10308951834010; 53gid1=10308951834010; 53revisit=1603952432864; 53kf_72241622_from_host=www.qimingzi.net; 53kf_72241622_keyword=; 53kf_72241622_land_page=http%253A%252F%252Fwww.qimingzi.net%252FnameReport.aspx%253Fsurname%253D%2525D3%2525DA%2526name%253D%2525D7%2525CE%2525E8%2525F2%2526sex%253D%2525C5%2525AE; kf_72241622_land_page_ok=1; 53uvid=1; onliner_zdfq72241622=0; __tins__5033285=%7B%22sid%22%3A%201603952125189%2C%20%22vd%22%3A%2013%2C%20%22expires%22%3A%201603955633053%7D; __51laig__=19; Hm_lpvt_1f1b125fd1b03fdb6cac5abdd0f5d306=1603953833",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36" }
| 61.185185 | 855 | 0.780872 |
a17499d6c68e7ae2e331bfdd45c5eb78e46de4c9
| 2,093 |
py
|
Python
|
pysces/contrib/__init__.py
|
katrinleinweber/pysces
|
197e666b9d48b9dd2db8ca572041bacf8b84efc3
|
[
"BSD-3-Clause"
] | null | null | null |
pysces/contrib/__init__.py
|
katrinleinweber/pysces
|
197e666b9d48b9dd2db8ca572041bacf8b84efc3
|
[
"BSD-3-Clause"
] | null | null | null |
pysces/contrib/__init__.py
|
katrinleinweber/pysces
|
197e666b9d48b9dd2db8ca572041bacf8b84efc3
|
[
"BSD-3-Clause"
] | null | null | null |
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2017 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,
Brett G. Olivier ([email protected])
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Brett G. Olivier
"""
__version__ = '0.4.0'
__doc__ = 'PySCeS contrib module loader'
import os
import pysces
tempdir = os.getcwd()
mod_path = os.path.join(pysces.install_dir,'contrib')
os.chdir(mod_path)
mod_dir_list = [dir for dir in os.listdir(mod_path) if os.path.isdir(dir)]
mod_load_list = []
mod_dict = {}
mod_os = os.name
#mod_os = 'posix' # transform a windows machine into a posix one
print '\nAdding contrib modules ...'
for mod in mod_dir_list:
status = 0
author = ''
email = ''
base = ''
web = ''
try:
exec('import ' + mod + ' as CurMod')
author = getattr(CurMod,'pysx_author')
email = getattr(CurMod,'pysx_email')
base = getattr(CurMod,'pysx_base_class')
web = getattr(CurMod,'pysx_web')
if mod_os in getattr(CurMod,'pysx_oscompat'):
print 'Including module \"' + mod + '\"'
if email != '':
print '\tAuthor(s): ' + author + ', (' + email + ')'
else:
print '\tAuthor(s): ' + author
if web != '':
print '\t' + web
status = 1
mod_load_list.append(mod)
else:
print '\t' + getattr(CurMod,'pysx_name') + ' only available on '\
+ str(getattr(CurMod,'pysx_oscompat'))
print '\tMaintainer ' + author + email
status = 0
del CurMod
except Exception, e:
print '\nModule ' + mod + ' *not* included'
print '\t',e,'\n'
mod_dict.update({mod:{}})
mod_dict[mod].update({'status':status,\
'path':os.path.abspath(os.path.join(mod_path,mod,'__init__.py')),\
'author':author,\
'email':email,\
'base':base})
print ' '
os.chdir(tempdir)
del pysces,os,tempdir
| 27.539474 | 86 | 0.672241 |
159afdac9f409c2d3a02a35d9e01a3e932c0e75e
| 549 |
py
|
Python
|
Chapter02/B06246_02_14-metadata.py
|
mapenthusiast/QGIS-Python-Programming-Cookbook-Second-Edition
|
1b2fefdb09f614a2005976a451f882a198c6c9c5
|
[
"MIT"
] | 43 |
2017-03-27T18:58:26.000Z
|
2022-03-25T15:29:45.000Z
|
Chapter02/B06246_02_14-metadata.py
|
mapenthusiast/QGIS-Python-Programming-Cookbook-Second-Edition
|
1b2fefdb09f614a2005976a451f882a198c6c9c5
|
[
"MIT"
] | 2 |
2018-07-02T09:23:47.000Z
|
2018-08-23T13:57:41.000Z
|
Chapter02/B06246_02_14-metadata.py
|
mapenthusiast/QGIS-Python-Programming-Cookbook-Second-Edition
|
1b2fefdb09f614a2005976a451f882a198c6c9c5
|
[
"MIT"
] | 31 |
2017-03-08T06:37:22.000Z
|
2021-12-17T21:51:30.000Z
|
# Accessing layer metadata
lyr = QgsVectorLayer("/qgis_data/nyc/NYC_MUSEUMS_GEO.shp", "Museums", "ogr")
QgsMapLayerRegistry.instance().addMapLayers([lyr])
m = layer.metadata()
lyr_cap = m.split(“Capabilities of this layer</p>\n<p>”)[1].split(“<”)[0].split(“,”)
lyr_cap = [x.strip() for x in lyr_cap]
# [u'Add Features', u'Delete Features', u'Change Attribute Values',
# u'Add Attributes', u'Delete Attributes', u'Rename Attributes',
# u'Create Spatial Index', u'Create Attribute Indexes',
# u'Fast Access to Features at ID', u'Change Geometries']
| 42.230769 | 84 | 0.715847 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.