hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794258f632c9efcfe981265f1e6c3bb9478a5291 | 913 | py | Python | core/mobile_devices/tasks.py | jcaguirre89/django-mobile-app | ca3a178603ba80d846d97bf7b1f8b0cfe259ea53 | [
"MIT"
] | 64 | 2017-10-06T21:56:22.000Z | 2022-03-26T07:35:44.000Z | core/mobile_devices/tasks.py | jcaguirre89/django-mobile-app | ca3a178603ba80d846d97bf7b1f8b0cfe259ea53 | [
"MIT"
] | 5 | 2018-02-16T12:17:35.000Z | 2019-03-28T12:35:54.000Z | core/mobile_devices/tasks.py | jcaguirre89/django-mobile-app | ca3a178603ba80d846d97bf7b1f8b0cfe259ea53 | [
"MIT"
] | 13 | 2017-12-20T21:51:05.000Z | 2021-02-02T18:03:34.000Z | import boto3
import logging
from django.conf import settings
from celery import shared_task
@shared_task
def register_device_on_sns(device):
"""
Registers your device on AWS SNS and attaches the ARN endpoint on the device object.
The ARN endpoint is used when publishing push notifications.
:param device: your device object, extending the AbstractMobileDevice.
:return: -
"""
try:
client = boto3.client('sns', region_name=settings.AWS_REGION)
platform_arn = settings.AWS_IOS_APPLICATION_ARN if device.is_ios else settings.AWS_ANDROID_APPLICATION_ARN
response = client.create_platform_endpoint(
PlatformApplicationArn=platform_arn,
Token=device.push_token,
)
endpoint_arn = response.get('EndpointArn')
device.arn_endpoint = endpoint_arn
device.save()
except Exception as e:
logging.error(e)
| 33.814815 | 114 | 0.714129 |
79425c0a7e0ac0fbc71bf38112a35bf8c87242ae | 10,377 | py | Python | src/local/butler/py_unittest.py | fengjixuchui/clusterfuzz | ef89be3934936d1086b4a21bffca5506c8cb93be | [
"Apache-2.0"
] | null | null | null | src/local/butler/py_unittest.py | fengjixuchui/clusterfuzz | ef89be3934936d1086b4a21bffca5506c8cb93be | [
"Apache-2.0"
] | null | null | null | src/local/butler/py_unittest.py | fengjixuchui/clusterfuzz | ef89be3934936d1086b4a21bffca5506c8cb93be | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""py_unittest.py runs tests under src/appengine and butler/tests"""
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import object
from builtins import range
import coverage
# Coverage needs to be at the top of the page. See: go/why-top-cov
COV = coverage.Coverage(config_file='.coveragerc')
COV.start()
import io
import itertools
import logging
import multiprocessing
import os
import platform
import signal
import sys
import time
import traceback
import unittest
from local.butler import appengine
from local.butler import common
from src.python.config import local_config
from src.python.metrics import logs
APPENGINE_TEST_DIRECTORY = os.path.join('src', 'python', 'tests', 'appengine')
CORE_TEST_DIRECTORY = os.path.join('src', 'python', 'tests', 'core')
SLOW_TEST_THRESHOLD = 2 # In seconds.
class TrackedTestResult(unittest.TextTestResult):
"""Result object that tracks slow-running tests."""
def __init__(self, *args, **kwargs):
super(TrackedTestResult, self).__init__(*args, **kwargs)
self.slow_tests = []
def startTest(self, test):
self._start_time = time.time()
super(TrackedTestResult, self).startTest(test)
def addSuccess(self, test):
elapsed_time = time.time() - self._start_time
super(TrackedTestResult, self).addSuccess(test)
if elapsed_time <= SLOW_TEST_THRESHOLD:
return
description = self.getDescription(test).splitlines()[0]
self.slow_tests.append((elapsed_time, description))
class TrackedTestRunner(unittest.TextTestRunner):
"""TextTestRunner wrapper that reports additional information we collect."""
def __init__(self, *args, **kwargs):
kwargs['resultclass'] = TrackedTestResult
super(TrackedTestRunner, self).__init__(*args, **kwargs)
def run(self, test):
result = super(TrackedTestRunner, self).run(test)
if not result.slow_tests:
return result
self.stream.writeln('\nSlow tests:')
for elapsed_time, test_name in sorted(result.slow_tests, reverse=True):
print('%6.2fs: %s' % (elapsed_time, test_name))
return result
class MeasureCoverage(object):
"""Use with `with` statement for measuring test coverage."""
def __init__(self, enabled):
self.enabled = enabled
def __enter__(self):
pass
def __exit__(self, exc_type, value, _):
COV.stop()
if not self.enabled:
return
COV.html_report(directory='coverage')
print('The tests cover %0.2f%% of the source code.' %
COV.report(file=io.BytesIO()))
print('The test coverage by lines can be seen at ./coverage/index.html')
class TestResult(object):
"""Test results."""
def __init__(self, output, num_errors, num_failures, num_skipped, total_run):
self.output = output
self.num_errors = num_errors
self.num_failures = num_failures
self.num_skipped = num_skipped
self.total_run = total_run
def test_worker_init():
"""Initialise test worker process."""
if platform.system() != 'Windows':
# Prevent KeyboardInterrupt error output.
signal.signal(signal.SIGINT, signal.SIG_IGN)
def run_one_test_parallel(args):
"""Test worker."""
try:
os.environ['PARALLEL_TESTS'] = '1'
test_modules, suppress_output = args
suite = unittest.loader.TestLoader().loadTestsFromNames(test_modules)
# We use BufferedWriter as a hack to accept both unicode and str write
# arguments.
stream = io.BufferedWriter(io.BytesIO())
# Verbosity=0 since we cannot see real-time test execution order when tests
# are executed in parallel.
result = unittest.TextTestRunner(
stream=stream, verbosity=0, buffer=suppress_output).run(suite)
stream.flush()
return TestResult(stream.raw.getvalue(), len(result.errors),
len(result.failures), len(result.skipped),
result.testsRun)
except BaseException:
# Print exception traceback here, as it will be lost otherwise.
traceback.print_exc()
raise
def run_tests_single_core(args, test_directory, top_level_dir, enable_coverage):
"""Run tests (single CPU)."""
suites = unittest.loader.TestLoader().discover(
test_directory, pattern=args.pattern, top_level_dir=top_level_dir)
with MeasureCoverage(enable_coverage):
# Verbosity=2 since we want to see real-time test execution with test name
# and result.
result = TrackedTestRunner(
verbosity=2, buffer=(not args.unsuppress_output)).run(suites)
if result.errors or result.failures:
sys.exit(1)
def run_tests_parallel(args, test_directory, top_level_dir):
"""Run tests (multiple CPUs)."""
suites = unittest.loader.TestLoader().discover(
test_directory, pattern=args.pattern, top_level_dir=top_level_dir)
test_classes = [] # pylint: disable=protected-access
for suite in suites:
for subsuite in suite._tests: # pylint: disable=protected-access
# According to:
# https://github.com/python/cpython/blob/2.7/Lib/unittest/loader.py#L24,
# this is how we can get a ModuleImportFailure error.
if subsuite.__class__.__name__ == 'ModuleImportFailure':
unittest.TextTestRunner(verbosity=1).run(subsuite)
raise Exception('A failure occurred while importing the module.')
else:
for test_class in subsuite._tests: # pylint: disable=protected-access
test_classes.append((test_class.__module__,
test_class.__class__.__name__))
test_classes = sorted(test_classes)
test_modules = []
for module_path, _ in itertools.groupby(test_classes, key=lambda k: k[0]):
test_modules.append(module_path)
test_modules = sorted(test_modules)
cpu_count = multiprocessing.cpu_count()
pool = multiprocessing.Pool(cpu_count, test_worker_init)
total_result = TestResult('', 0, 0, 0, 0)
# partition tests
test_args = []
tests_per_cpu = max(1, len(test_modules) // cpu_count)
for i in range(0, len(test_modules), tests_per_cpu):
group = test_modules[i:i + tests_per_cpu]
test_args.append((group, not args.unsuppress_output))
results = pool.map_async(run_one_test_parallel, test_args)
while True:
try:
# KeyboardInterrupt never gets raised unless we pass a timeout.
results = results.get(timeout=600)
break
except KeyboardInterrupt:
pool.terminate()
pool.join()
sys.exit(1)
pool.close()
pool.join()
for result in results:
if result.num_failures or result.num_errors:
print(result.output)
total_result.num_errors += result.num_errors
total_result.num_failures += result.num_failures
total_result.num_skipped += result.num_skipped
total_result.total_run += result.total_run
print('Ran %d tests (%d skipped, %d errors, %d failures).' %
(total_result.total_run, total_result.num_skipped,
total_result.num_errors, total_result.num_failures))
if total_result.num_errors or total_result.num_failures:
sys.exit(1)
def execute(args):
"""Run Python unit tests. For unittests involved appengine, sys.path needs
certain modification."""
os.environ['PY_UNITTESTS'] = 'True'
if os.getenv('INTEGRATION') or os.getenv('UNTRUSTED_RUNNER_TESTS'):
# Set up per-user buckets used by integration tests.
os.environ['CORPUS_BUCKET'] = common.test_bucket('TEST_CORPUS_BUCKET')
os.environ['QUARANTINE_BUCKET'] = common.test_bucket(
'TEST_QUARANTINE_BUCKET')
os.environ['BACKUP_BUCKET'] = common.test_bucket('TEST_BACKUP_BUCKET')
os.environ['COVERAGE_BUCKET'] = common.test_bucket('TEST_COVERAGE_BUCKET')
# Kill leftover instances of emulators and dev appserver.
common.kill_leftover_emulators()
# Don't use absolute paths to make it easier to compare results in tests.
os.environ['CONFIG_DIR_OVERRIDE'] = os.path.join('.', 'configs', 'test')
top_level_dir = os.path.join('src', 'python')
if args.target == 'appengine':
# Build template files.
appengine.build_templates()
test_directory = APPENGINE_TEST_DIRECTORY
sys.path.insert(0, os.path.abspath(os.path.join('src', 'appengine')))
# Get additional App Engine third party imports.
import dev_appserver
sys.path.extend(dev_appserver.EXTRA_PATHS)
# Loading appengine_config from the current project ensures that any
# changes to configuration there are available to all tests (e.g.
# sys.path modifications, namespaces, etc.)
try:
from src.appengine import appengine_config
(appengine_config) # pylint: disable=pointless-statement
except ImportError:
print('Note: unable to import appengine_config.')
elif args.target == 'core':
test_directory = CORE_TEST_DIRECTORY
else:
# Config module tests.
os.environ['CONFIG_DIR_OVERRIDE'] = args.config_dir
test_directory = os.path.join(args.config_dir, 'modules')
top_level_dir = None
# Modules may use libs from our App Engine directory.
sys.path.insert(0, os.path.abspath(os.path.join('src', 'appengine')))
# Fix paths again to get config modules added to the import path.
from python.base import modules
modules.fix_module_search_paths()
# Set expected environment variables.
local_config.ProjectConfig().set_environment()
# Needed for NDB to work with cloud datastore emulator.
os.environ['DATASTORE_USE_PROJECT_ID_AS_APP_ID'] = 'true'
if args.verbose:
logs.configure_for_tests()
else:
# Disable logging.
logging.disable(logging.CRITICAL)
enable_coverage = args.pattern is None
if args.pattern is None:
args.pattern = '*_test.py'
if args.parallel:
# TODO(tanin): Support coverage.
run_tests_parallel(args, test_directory, top_level_dir)
else:
run_tests_single_core(args, test_directory, top_level_dir, enable_coverage)
| 32.735016 | 80 | 0.722078 |
79425ccfd1f75058d331c5d59c095c8d537889f4 | 3,801 | py | Python | blinker-rel-0.8/tests/test_saferef.py | xinnjie/reading_source_code | a0c7965c33849608f1e9eeb6e175cc7a2866006c | [
"MIT"
] | null | null | null | blinker-rel-0.8/tests/test_saferef.py | xinnjie/reading_source_code | a0c7965c33849608f1e9eeb6e175cc7a2866006c | [
"MIT"
] | null | null | null | blinker-rel-0.8/tests/test_saferef.py | xinnjie/reading_source_code | a0c7965c33849608f1e9eeb6e175cc7a2866006c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# extracted from Louie, http://pylouie.org/
# updated for Python 3
#
# Copyright (c) 2006 Patrick K. O'Brien, Mike C. Fletcher,
# Matthew R. Scott
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the <ORGANIZATION> nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from blinker._saferef import safe_ref
class _Sample1(object):
def x(self):
pass
def _sample2(obj):
pass
class _Sample3(object):
def __call__(self, obj):
pass
class TestSaferef(unittest.TestCase):
# XXX: The original tests had a test for closure, and it had an
# off-by-one problem, perhaps due to scope issues. It has been
# removed from this test suite.
def setUp(self):
ts = []
ss = []
for x in range(100):
t = _Sample1()
ts.append(t)
s = safe_ref(t.x, self._closure)
ss.append(s)
ts.append(_sample2)
ss.append(safe_ref(_sample2, self._closure))
for x in range(30):
t = _Sample3()
ts.append(t)
s = safe_ref(t, self._closure)
ss.append(s)
self.ts = ts
self.ss = ss
self.closure_count = 0
def tearDown(self):
if hasattr(self, 'ts'):
del self.ts
if hasattr(self, 'ss'):
del self.ss
def test_In(self):
"""Test the `in` operator for safe references (cmp)"""
for t in self.ts[:50]:
assert safe_ref(t.x) in self.ss
def test_Valid(self):
"""Test that the references are valid (return instance methods)"""
for s in self.ss:
assert s()
def test_ShortCircuit(self):
"""Test that creation short-circuits to reuse existing references"""
sd = {}
for s in self.ss:
sd[s] = 1
for t in self.ts:
if hasattr(t, 'x'):
assert sd.has_key(safe_ref(t.x))
else:
assert sd.has_key(safe_ref(t))
def test_Representation(self):
"""Test that the reference object's representation works
XXX Doesn't currently check the results, just that no error
is raised
"""
repr(self.ss[-1])
def _closure(self, ref):
"""Dumb utility mechanism to increment deletion counter"""
self.closure_count += 1
| 31.675 | 76 | 0.637201 |
79425dd3a6b516ee7f77df48a7408fa785f6bb50 | 220 | py | Python | script/data_handler/DummyDataset.py | demetoir/MLtools | 8c42fcd4cc71728333d9c116ade639fe57d50d37 | [
"MIT"
] | null | null | null | script/data_handler/DummyDataset.py | demetoir/MLtools | 8c42fcd4cc71728333d9c116ade639fe57d50d37 | [
"MIT"
] | null | null | null | script/data_handler/DummyDataset.py | demetoir/MLtools | 8c42fcd4cc71728333d9c116ade639fe57d50d37 | [
"MIT"
] | null | null | null | from script.data_handler.Base.BaseDataset import BaseDataset
class DummyDataset(BaseDataset):
def load(self, path):
pass
def save(self):
pass
def transform(self):
pass
| 16.923077 | 61 | 0.618182 |
79425eaee4e0a674b6eac56fe055f75ab788686d | 5,835 | py | Python | src/data/dataClean.py | ReubenGitHub/MachineLearning-Vehicle-Emissions | 5a6d5366d15cb918de5464c48e0067efceda4149 | [
"Apache-2.0"
] | null | null | null | src/data/dataClean.py | ReubenGitHub/MachineLearning-Vehicle-Emissions | 5a6d5366d15cb918de5464c48e0067efceda4149 | [
"Apache-2.0"
] | null | null | null | src/data/dataClean.py | ReubenGitHub/MachineLearning-Vehicle-Emissions | 5a6d5366d15cb918de5464c48e0067efceda4149 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Reuben Owen-Williams
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mysql.connector
from mysql.connector import Error
from dbLogin import configprivate
import pandas as pd
import numpy as np
"""
Manipulates the "uk_gov_data" table to produce both sparse ("uk_gov_data_sparse") and dense ("uk_gov_data_dense") tables to address the points identified in dataInitialiseAnalyse.py.
"""
def create_database_connection(host_name, user_name, user_password, database):
"""
Returns a connection to the database "vehicles" in the local MySQL server.
"""
connection = None
try:
connection = mysql.connector.connect(
host=host_name,
user=user_name,
passwd=user_password,
database=database
)
print("MySQL Database connection successful")
except Error as err:
print(f"Error: '{err}'")
return connection
def create_table(connection, query):
"""
Creates a table in the "vehicles" database in the local MySQL server.
"""
cursor = connection.cursor()
try:
cursor.execute(query)
print("Table created successfully")
except Error as err:
print(f"Error: '{err}'")
def insert_table(connection, query, df):
"""
Performs queries, e.g. INSERT, in the "vehicles" database.
"""
cursor = connection.cursor()
try:
for i in range(0, df.shape[0]):
cursor.execute(query, tuple(df.iloc[i].values.flatten().tolist()))
connection.commit()
print("Table edited successfully")
except Error as err:
print(f"Error: '{err}'")
def main():
"""
Manipulates the "uk_gov_data" table to produce both sparse ("uk_gov_data_sparse") and dense ("uk_gov_data_dense") tables to address the points identified in dataInitialiseAnalyse.py.
"""
connection = create_database_connection("localhost", configprivate.username, configprivate.password, "vehicles")
# Read the UK gov data from the "vehicles" database using pandas. Convert "car_id" from int64 (a numpy type) to float as MySQL cannot convert:
# https://stackoverflow.com/questions/56731036/interfaceerror-failed-executing-the-operation-python-type-numpy-int64-cannot-b
govData = pd.read_sql("SELECT * FROM uk_gov_data", connection)
govData = govData.astype(dtype = {"car_id": float}, copy=True)
# Create the table "uk_gov_data_sparse".
create_govtablesparse_query = """
USE vehicles;
CREATE TABLE uk_gov_data_sparse LIKE uk_gov_data;
"""
create_table(connection, create_govtablesparse_query)
# (4) Replace "Electric - Not Applicable" in "transmission_type" with "Automatic" when "fuel" = "Petrol".
govData.loc[(govData["fuel"] == "Petrol")&(govData["transmission_type"] == "Electric - Not Applicable"),"transmission_type"] = "Automatic"
# (7) Replace "powertrain" and "co2_emission_gPERkm" when "model" = "Evoque, 20MY" and "powertrain" = "Hybrid Electric Vehicle (HEV)".
indices = govData[ (govData["powertrain"]=="Hybrid Electric Vehicle (HEV)") & (govData["model"]=="Evoque, 20MY") ].index
govData.loc[indices,"powertrain"] = "Plug-in Hybrid Electric Vehicle (PHEV)"
govData.loc[indices[0],"co2_emissions_gPERkm"] = 32
govData.loc[indices[1],"co2_emissions_gPERkm"] = 38
# (7) Replace "co2_emissions_gPERkm" with "22" when "description" = "RAV4 Design 2.5 Plug-in Hybrid".
govData.loc[govData["description"] == "RAV4 Design 2.5 Plug-in Hybrid","co2_emissions_gPERkm"] = 22
# Populate the (relatively speaking) sparse table "uk_gov_data_sparse".
connection = create_database_connection("localhost", configprivate.username, configprivate.password, "vehicles")
govDataSparseImport = govData.replace({np.nan: None}, inplace=False)
query = """INSERT INTO uk_gov_data_sparse VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
insert_table(connection, query, govDataSparseImport)
# Save this cleaned sparse data as a csv to "data\intermediate".
govDataSparseImport.to_csv('./data/intermediate/uk_gov_data_sparse.csv', index=False, encoding="ISO-8859-1")
# (1) Now to create the dense data set, replace nulls in "transmission" with "Auto".
govData["transmission"].replace({np.nan: "Auto"}, inplace=True)
# (2) Replace nulls in "engine_size_cm3" with 0.
govData["engine_size_cm3"].replace({np.nan: 0}, inplace=True)
# (3) Replace nulls in "power_ps" with 0.
govData["power_ps"].replace({np.nan: 0}, inplace=True)
# Create the table "uk_gov_data_dense".
create_govtabledense_query = """
USE vehicles;
CREATE TABLE uk_gov_data_dense LIKE uk_gov_data;
"""
create_table(connection, create_govtabledense_query)
# Populate the dense table "uk_gov_data_dense".
connection = create_database_connection("localhost", configprivate.username, configprivate.password, "vehicles")
govDataDenseImport = govData
query = """INSERT INTO uk_gov_data_dense VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
insert_table(connection, query, govDataDenseImport)
# Save this cleaned dense data as a csv to "data\intermediate".
govDataDenseImport.to_csv('./data/intermediate/uk_gov_data_dense.csv', index=False, encoding="ISO-8859-1")
main() | 45.232558 | 186 | 0.696315 |
7942606e49d3a33460e5c37e3f853860b425dc88 | 2,335 | py | Python | library/regenerate_shadow.py | ashithwilson/deepslate | 13e1ce8e5cdcf2df8932387f98fdec55d39345d7 | [
"Unlicense",
"MIT"
] | null | null | null | library/regenerate_shadow.py | ashithwilson/deepslate | 13e1ce8e5cdcf2df8932387f98fdec55d39345d7 | [
"Unlicense",
"MIT"
] | null | null | null | library/regenerate_shadow.py | ashithwilson/deepslate | 13e1ce8e5cdcf2df8932387f98fdec55d39345d7 | [
"Unlicense",
"MIT"
] | null | null | null | #To regenerate missing shadow file enteries: this will re-add missing shadow entries with default values.(customer need to reset the password for the missing emails) Use it only as a final resort
#warning: This will re-add missing sahdow enteries with deafult value.
import os,argparse
import subprocess
from os.path import expanduser
parser=argparse.ArgumentParser(
description="To regenerate shadow file with a deafult value. This will only create shadow entry for missing email address with default value. so user to reset his password to use his passord"
)
parser.add_argument('user', help="cPanel user name")
args=parser.parse_args()
home=expanduser("~"+args.user)
dir=os.listdir(home+"/etc")
str=[]
pt=[]
print("\033[92m")
for i in dir:
try:
with open(home+"/etc/"+i+"/shadow","r") as file:
for k in file:
str.append(k.split(':')[0]+"@"+i)
#print(str)
except:
pass
for i in os.listdir(home+"/mail"):
if '@' in i:
pt.append(i.split('.')[1].replace('_','.'))
#with open("test","r") as file2:
# for kj in file2:
# pt.append(kj.split()[0])
#print(pt)
print("====================")
print("Enteries in mail directory: \n")
print(pt)
print("Enteries in shadow file : \n")
print(str)
new_array=(list(set(pt)-set(str)))
if len(new_array) == 0:
print("\033[0m")
print("Enteries in mail directories is subset of shadow enteries : Nothing to do here")
quit()
print("\033[93m")
print("The shadow entry of following accounts are missing")
print("=====================")
print("\033[0m")
for i in new_array:
print i
#print(new_array "\n")
print("\033[93m")
txt = raw_input("Do you need to proceed with restoration: (yes|no) ")
print("\033[0m")
if txt == "yes":
print("Backing up all shadow files")
os.chdir(home)
subprocess.call("t=$RANDOM;for i in $(find . -iname 'shadow');do cp $i $i'_backup_'$t;done",shell=True)
for i in new_array:
print(i)
r=i.split('@')[0]
m=i.split('@')[1]
print(r+"\n"+m)
with open(home+"/etc/"+m+"/shadow","ab+") as file:
file.write(r+":\$6\$roottn\$lCukmfCJGtLN.vP9WSQlpcTSNYNHKz81YAmbxW/iuZ7cZD4AYt7AjnX.FR1F/lC2SSM3P5hfQsM811Qgk85iN/:16249:::::")
file.write("\n")
elif txt == "no":
quit()
| 35.378788 | 195 | 0.626552 |
7942607698102324d3ff9f7d60cbdcf8ff5560f3 | 1,339 | py | Python | python/rest-client-samples/ocr/ocr_vat_invoice.py | 798000648/ais-sdk | 6831d8fa02143b856481706998a0eb9278da2ccd | [
"Apache-2.0"
] | 1 | 2020-06-06T08:52:47.000Z | 2020-06-06T08:52:47.000Z | python/rest-client-samples/ocr/ocr_vat_invoice.py | 798000648/ais-sdk | 6831d8fa02143b856481706998a0eb9278da2ccd | [
"Apache-2.0"
] | null | null | null | python/rest-client-samples/ocr/ocr_vat_invoice.py | 798000648/ais-sdk | 6831d8fa02143b856481706998a0eb9278da2ccd | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
import urllib2
import json
import ssl
from urllib2 import HTTPError, URLError
#
# access ocr vat invoice
#
def ocr_vat_invoice(token, url):
_url = 'https://ais.cn-north-1.myhuaweicloud.com/v1.0/ocr/vat-invoice'
_data = {
"image":"",
"url":url,
"side":"front"
}
kreq = urllib2.Request( url = _url)
kreq.add_header('Content-Type', 'application/json')
kreq.add_header('X-Auth-Token', token )
kreq.add_data(json.dumps(_data))
resp = None
status_code = None
try:
#
# Here we use the unvertified-ssl-context, Because in FunctionStage
# the client CA-validation have some problem, so we must do this.
#
_context = ssl._create_unverified_context()
r = urllib2.urlopen(kreq, context=_context)
#
# We use HTTPError and URLError,because urllib2 can't process the 4XX &
# 500 error in the single urlopen function.
#
# If you use a modern, high-level designed HTTP client lib, Yeah, I mean requests,
# there is no this problem.
#
except HTTPError, e:
resp = e.read()
status_code = e.code
except URLError, e:
resp = e.read()
status_code = e.code
else:
status_code = r.code
resp = r.read()
return resp
| 25.75 | 87 | 0.606423 |
794260790bcff798aa15a387de233b603d7cf358 | 3,213 | py | Python | Sprites-Full/Animales/movAnimales.py | alejoso76/Computaci-n-gr-fica | 474a498a328b8951aa0bfa1db2d0d1f3d8cc914b | [
"MIT"
] | null | null | null | Sprites-Full/Animales/movAnimales.py | alejoso76/Computaci-n-gr-fica | 474a498a328b8951aa0bfa1db2d0d1f3d8cc914b | [
"MIT"
] | null | null | null | Sprites-Full/Animales/movAnimales.py | alejoso76/Computaci-n-gr-fica | 474a498a328b8951aa0bfa1db2d0d1f3d8cc914b | [
"MIT"
] | null | null | null | import pygame
import math
ANCHO=640
ALTO=480
def mostrarPos():
pos=pygame.mouse.get_pos()
return pos
if __name__ == '__main__':
pygame.init()
pantalla=pygame.display.set_mode([ANCHO, ALTO]) #Crea la ventana
#Carga la imagen a una variable
fondo=pygame.image.load('animals.png')
infoFondo=fondo.get_rect()
print infoFondo
ancho_imagen=infoFondo[2]
alto_imagen=infoFondo[3]
print "Ancho = ",ancho_imagen
print "Alto = ",alto_imagen
print 'Funciona'
fin=False
pos_x=0
pos_y=0
i=0
alto_corte=alto_imagen/8
ancho_corte=ancho_imagen/12
x=0
y=0
movR=[]
movL=[]
movU=[]
movD=[]
posGato=[50,50]
dir='R'
#Ancho:30, alto:40
for i in range(3):
cuadro=fondo.subsurface(i*ancho_corte,2*alto_corte,ancho_corte, alto_corte)
movR.append(cuadro)
for i in range(3):
cuadro=fondo.subsurface(i*ancho_corte,1*alto_corte,ancho_corte, alto_corte)
movL.append(cuadro)
for i in range(3):
cuadro=fondo.subsurface(i*ancho_corte,3*alto_corte,ancho_corte, alto_corte)
movU.append(cuadro)
for i in range(3):
cuadro=fondo.subsurface(i*ancho_corte,0*alto_corte,ancho_corte, alto_corte)
movD.append(cuadro)
pantalla.blit(movR[0], posGato)
pygame.display.flip()
reloj=pygame.time.Clock()
while not fin:
for event in pygame.event.get():
if event.type == pygame.QUIT:
fin=True
if event.type == pygame.KEYDOWN:
if event.key==pygame.K_LEFT:
dir='L'
var_x=-2
posGato[0]+=var_x
pantalla.fill([0,0,0])
pantalla.blit(movL[i], posGato)
pygame.display.flip()
i+=1
if i>=3:
i=0
if event.key==pygame.K_RIGHT:
dir='R'
var_x=2
posGato[0]+=var_x
pantalla.fill([0,0,0])
pantalla.blit(movR[i], posGato)
pygame.display.flip()
i+=1
if i>=3:
i=0
if event.key==pygame.K_UP:
dir='U'
var_y=-2
posGato[1]+=var_y
pantalla.fill([0,0,0])
pantalla.blit(movU[i], posGato)
pygame.display.flip()
i+=1
if i>=3:
i=0
if event.key==pygame.K_DOWN:
dir='D'
var_y=+2
posGato[1]+=var_y
pantalla.fill([0,0,0])
pantalla.blit(movD[i], posGato)
pygame.display.flip()
i+=1
if i>=3:
i=0
if event.type==pygame.KEYUP:
#Variaciones
var_x=0
var_y=0
#pantalla.blit(movR[i], posGato)
pygame.display.flip()
reloj.tick(15)
| 23.625 | 83 | 0.472144 |
794260f71d0968c9aabe4a8ae7e4642663d07bba | 1,172 | py | Python | 04-spark/MaxTemperatures.py | raphaeldeaquino/pos-ia-bd | 112ee6c685cce1234ee732682fad9bd8733c3b29 | [
"Apache-2.0"
] | null | null | null | 04-spark/MaxTemperatures.py | raphaeldeaquino/pos-ia-bd | 112ee6c685cce1234ee732682fad9bd8733c3b29 | [
"Apache-2.0"
] | null | null | null | 04-spark/MaxTemperatures.py | raphaeldeaquino/pos-ia-bd | 112ee6c685cce1234ee732682fad9bd8733c3b29 | [
"Apache-2.0"
] | null | null | null | import findspark
findspark.init()
from pyspark.sql import SparkSession
def parse_line(line):
fields = line.split(',')
station_id = fields[0]
entry_type = fields[2]
temperature = float(fields[3]) * 0.1 * (9.0 / 5.0) + 32.0
return station_id, entry_type, temperature
# Find the maximum temperature by weather station
if __name__ == "__main__":
spark = (SparkSession
.builder
.appName("MaxTemperatures")
.getOrCreate())
# Read each line of input data
lines = spark.sparkContext.textFile("data/1800.csv")
# Convert to (stationID, entryType, temperature) tuples
parsedLines = lines.map(parse_line)
# Filter out all but TMAX entries
maxTemps = parsedLines.filter(lambda x: "TMAX" in x[1])
# Convert to (stationID, temperature)
stationTemps = maxTemps.map(lambda x: (x[0], x[2]))
# Reduce by stationID retaining the minimum temperature found
maxTemps = stationTemps.reduceByKey(lambda x, y: max(x, y))
# Collect, format, and print the results
results = maxTemps.collect()
for result in results:
print(result[0] + "\t{:.2f}F".format(result[1]))
| 27.904762 | 65 | 0.659556 |
794261002360745a1a7384904c6f9efdc2e74445 | 2,267 | py | Python | config/urls.py | Sinha-Ujjawal/django-starter | 042a50c917fa9123dd899cf6aafaf916e23e4521 | [
"MIT"
] | null | null | null | config/urls.py | Sinha-Ujjawal/django-starter | 042a50c917fa9123dd899cf6aafaf916e23e4521 | [
"MIT"
] | null | null | null | config/urls.py | Sinha-Ujjawal/django-starter | 042a50c917fa9123dd899cf6aafaf916e23e4521 | [
"MIT"
] | null | null | null | """your_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, URLPattern
from django.urls.conf import include
from django.conf import settings
from django.conf.urls.static import static
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="your_project API",
default_version="v1.0.0",
description="your_project is a ...", # TODO
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(url="https://github.com/Sinha-Ujjawal"),
# license=openapi.License(name="MIT License"), TODO
),
public=True,
permission_classes=(permissions.AllowAny,),
)
def path_with_base(base_url: str):
"""Returns a function that adds a given prefix to all the paths generated from
returned function
"""
def _inner(route: str, *args, **kwargs) -> URLPattern:
return path(f"{base_url}/{route}", *args, **kwargs)
return _inner
base_path = path_with_base("your_project")
urlpatterns = [
base_path(
"playground/",
schema_view.with_ui("swagger", cache_timeout=0),
name="schema-swagger-ui",
),
base_path(
"docs/",
schema_view.with_ui("redoc", cache_timeout=0),
name="schema-redoc",
),
base_path("admin/", admin.site.urls),
base_path("auth/", include("your_project.authentication.urls")),
base_path("users/", include("your_project.users.urls")),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 31.054795 | 82 | 0.692545 |
794261ecea91ffdd00c1a5198786b24922fea59b | 4,176 | py | Python | venidium/types/spend_bundle.py | Venidium-Network/venidium-blockchain | 600af545018e2cc03c808315239d57c74cffd57d | [
"Apache-2.0"
] | 7 | 2021-06-29T22:23:55.000Z | 2022-02-09T04:32:46.000Z | venidium/types/spend_bundle.py | Venidium-Network/venidium-blockchain | 600af545018e2cc03c808315239d57c74cffd57d | [
"Apache-2.0"
] | 2 | 2021-09-13T03:23:59.000Z | 2022-01-12T20:20:27.000Z | venidium/types/spend_bundle.py | Venidium-Network/venidium-blockchain | 600af545018e2cc03c808315239d57c74cffd57d | [
"Apache-2.0"
] | null | null | null | import dataclasses
import warnings
from dataclasses import dataclass
from typing import List
from blspy import AugSchemeMPL, G2Element
from venidium.consensus.default_constants import DEFAULT_CONSTANTS
from venidium.types.blockchain_format.coin import Coin
from venidium.types.blockchain_format.sized_bytes import bytes32
from venidium.util.streamable import Streamable, dataclass_from_dict, recurse_jsonify, streamable
from venidium.wallet.util.debug_spend_bundle import debug_spend_bundle
from .coin_spend import CoinSpend
@dataclass(frozen=True)
@streamable
class SpendBundle(Streamable):
"""
This is a list of coins being spent along with their solution programs, and a single
aggregated signature. This is the object that most closely corresponds to a bitcoin
transaction (although because of non-interactive signature aggregation, the boundaries
between transactions are more flexible than in bitcoin).
"""
coin_spends: List[CoinSpend]
aggregated_signature: G2Element
@property
def coin_solutions(self):
return self.coin_spends
@classmethod
def aggregate(cls, spend_bundles) -> "SpendBundle":
coin_spends: List[CoinSpend] = []
sigs: List[G2Element] = []
for bundle in spend_bundles:
coin_spends += bundle.coin_spends
sigs.append(bundle.aggregated_signature)
aggregated_signature = AugSchemeMPL.aggregate(sigs)
return cls(coin_spends, aggregated_signature)
def additions(self) -> List[Coin]:
items: List[Coin] = []
for coin_spend in self.coin_spends:
items.extend(coin_spend.additions())
return items
def removals(self) -> List[Coin]:
"""This should be used only by wallet"""
return [_.coin for _ in self.coin_spends]
def fees(self) -> int:
"""Unsafe to use for fees validation!!!"""
amount_in = sum(_.amount for _ in self.removals())
amount_out = sum(_.amount for _ in self.additions())
return amount_in - amount_out
def name(self) -> bytes32:
return self.get_hash()
def debug(self, agg_sig_additional_data=DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA):
debug_spend_bundle(self, agg_sig_additional_data)
def not_ephemeral_additions(self):
all_removals = self.removals()
all_additions = self.additions()
result: List[Coin] = []
for add in all_additions:
if add in all_removals:
continue
result.append(add)
return result
# Note that `coin_spends` used to have the bad name `coin_solutions`.
# Some API still expects this name. For now, we accept both names.
#
# TODO: continue this deprecation. Eventually, all code below here should be removed.
# 1. set `exclude_modern_keys` to `False` (and manually set to `True` where necessary)
# 2. set `include_legacy_keys` to `False` (and manually set to `False` where necessary)
# 3. remove all references to `include_legacy_keys=True`
# 4. remove all code below this point
@classmethod
def from_json_dict(cls, json_dict):
if "coin_solutions" in json_dict:
if "coin_spends" not in json_dict:
json_dict = dict(
aggregated_signature=json_dict["aggregated_signature"], coin_spends=json_dict["coin_solutions"]
)
warnings.warn("`coin_solutions` is now `coin_spends` in `SpendBundle.from_json_dict`")
else:
raise ValueError("JSON contains both `coin_solutions` and `coin_spends`, just use `coin_spends`")
return dataclass_from_dict(cls, json_dict)
def to_json_dict(self, include_legacy_keys: bool = True, exclude_modern_keys: bool = True):
if include_legacy_keys is False and exclude_modern_keys is True:
raise ValueError("`coin_spends` not included in legacy or modern outputs")
d = dataclasses.asdict(self)
if include_legacy_keys:
d["coin_solutions"] = d["coin_spends"]
if exclude_modern_keys:
del d["coin_spends"]
return recurse_jsonify(d)
| 37.963636 | 115 | 0.688218 |
7942626d8c83fb226f9701569741c2ca19a8f0dc | 666 | py | Python | manage.py | cebanauskes/ida_images | 708eb44274b28d53f9b0422fbf3711d85ac62a6b | [
"MIT"
] | null | null | null | manage.py | cebanauskes/ida_images | 708eb44274b28d53f9b0422fbf3711d85ac62a6b | [
"MIT"
] | null | null | null | manage.py | cebanauskes/ida_images | 708eb44274b28d53f9b0422fbf3711d85ac62a6b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ida_images.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.956522 | 74 | 0.68018 |
7942634272079fc9638d55a79f254b5e7ffb0c86 | 14,972 | py | Python | djangae/db/backends/appengine/indexing.py | martinogden/djangae | 22610a636556c98a68200ebbeb6f1f57da42d617 | [
"BSD-3-Clause"
] | null | null | null | djangae/db/backends/appengine/indexing.py | martinogden/djangae | 22610a636556c98a68200ebbeb6f1f57da42d617 | [
"BSD-3-Clause"
] | null | null | null | djangae/db/backends/appengine/indexing.py | martinogden/djangae | 22610a636556c98a68200ebbeb6f1f57da42d617 | [
"BSD-3-Clause"
] | null | null | null | import logging
import yaml
import os
import datetime
import re
from djangae.sandbox import allow_mode_write
from django.conf import settings
_special_indexes = {}
_last_loaded_time = None
MAX_COLUMNS_PER_SPECIAL_INDEX = getattr(settings, "DJANGAE_MAX_COLUMNS_PER_SPECIAL_INDEX", 3)
CHARACTERS_PER_COLUMN = [31, 44, 54, 63, 71, 79, 85, 91, 97, 103]
def _get_index_file():
from djangae.utils import find_project_root
index_file = os.path.join(find_project_root(), "djangaeidx.yaml")
return index_file
def _get_table_from_model(model_class):
return model_class._meta.db_table.encode("utf-8")
def load_special_indexes():
global _special_indexes
global _last_loaded_time
index_file = _get_index_file()
if not os.path.exists(index_file):
# No file, no special index
logging.debug("Not loading any special indexes")
return
mtime = os.path.getmtime(index_file)
if _last_loaded_time and _last_loaded_time == mtime:
return
# Load any existing indexes
with open(index_file, "r") as stream:
data = yaml.load(stream)
_special_indexes = data
_last_loaded_time = mtime
logging.debug("Loaded special indexes for {0} models".format(len(_special_indexes)))
def special_index_exists(model_class, field_name, index_type):
table = _get_table_from_model(model_class)
return index_type in _special_indexes.get(table, {}).get(field_name, [])
def special_indexes_for_model(model_class):
classes = [ model_class ] + model_class._meta.parents.keys()
result = {}
for klass in classes:
result.update(_special_indexes.get(_get_table_from_model(klass), {}))
return result
def special_indexes_for_column(model_class, column):
return special_indexes_for_model(model_class).get(column, [])
def write_special_indexes():
index_file = _get_index_file()
with allow_mode_write():
with open(index_file, "w") as stream:
stream.write(yaml.dump(_special_indexes))
def add_special_index(model_class, field_name, index_type, value=None):
from djangae.utils import on_production, in_testing
from django.conf import settings
indexer = REQUIRES_SPECIAL_INDEXES[index_type]
index_type = indexer.prepare_index_type(index_type, value)
field_name = field_name.encode("utf-8") # Make sure we are working with strings
load_special_indexes()
if special_index_exists(model_class, field_name, index_type):
return
if on_production() or (in_testing() and not getattr(settings, "GENERATE_SPECIAL_INDEXES_DURING_TESTING", False)):
raise RuntimeError(
"There is a missing index in your djangaeidx.yaml - \n\n{0}:\n\t{1}: [{2}]".format(
_get_table_from_model(model_class), field_name, index_type
)
)
_special_indexes.setdefault(
_get_table_from_model(model_class), {}
).setdefault(field_name, []).append(str(index_type))
write_special_indexes()
class Indexer(object):
def validate_can_be_indexed(self, value, negated):
"""Return True if the value is indexable, False otherwise"""
raise NotImplementedError()
def prep_value_for_database(self, value, index): raise NotImplementedError()
def prep_value_for_query(self, value): raise NotImplementedError()
def indexed_column_name(self, field_column, value, index): raise NotImplementedError()
def prep_query_operator(self, op): return "exact"
def prepare_index_type(self, index_type, value): return index_type
def unescape(self, value):
value = value.replace("\\_", "_")
value = value.replace("\\%", "%")
value = value.replace("\\\\", "\\")
return value
class IExactIndexer(Indexer):
def validate_can_be_indexed(self, value, negated):
return len(value) < 500
def prep_value_for_database(self, value, index):
if value is None:
return None
if isinstance(value, (int, long)):
value = str(value)
return value.lower()
def prep_value_for_query(self, value):
return value.lower()
def indexed_column_name(self, field_column, value, index):
return "_idx_iexact_{0}".format(field_column)
class DayIndexer(Indexer):
def validate_can_be_indexed(self, value, negated):
return isinstance(value, (datetime.datetime, datetime.date))
def prep_value_for_database(self, value, index):
if value:
return value.day
return None
def prep_value_for_query(self, value):
if isinstance(value, (int, long)):
return value
if isinstance(value, basestring):
value = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
return value.day
def indexed_column_name(self, field_column, value, index):
return "_idx_day_{0}".format(field_column)
class YearIndexer(Indexer):
def validate_can_be_indexed(self, value, negated):
return isinstance(value, (datetime.datetime, datetime.date))
def prep_value_for_database(self, value, index):
if value:
return value.year
return None
def prep_value_for_query(self, value):
if isinstance(value, (int, long)):
return value
if isinstance(value, basestring):
value = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
return value.year
def indexed_column_name(self, field_column, value, index):
return "_idx_year_{0}".format(field_column)
class MonthIndexer(Indexer):
def validate_can_be_indexed(self, value, negated):
return isinstance(value, (datetime.datetime, datetime.date))
def prep_value_for_database(self, value, index):
if value:
return value.month
return None
def prep_value_for_query(self, value):
if isinstance(value, (int, long)):
return value
if isinstance(value, basestring):
value = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
return value.month
def indexed_column_name(self, field_column, value, index):
return "_idx_month_{0}".format(field_column)
class WeekDayIndexer(Indexer):
def validate_can_be_indexed(self, value, negated):
return isinstance(value, (datetime.datetime, datetime.date))
def prep_value_for_database(self, value, index):
if value:
zero_based_weekday = value.weekday()
if zero_based_weekday == 6: # Sunday
return 1 # Django treats the week as starting at Sunday, but 1 based
else:
return zero_based_weekday + 2
return None
def prep_value_for_query(self, value):
return value
def indexed_column_name(self, field_column, value, index):
return "_idx_week_day_{0}".format(field_column)
class ContainsIndexer(Indexer):
def number_of_permutations(self, value):
return sum(range(len(value)+1))
def validate_can_be_indexed(self, value, negated):
if negated:
return False
return isinstance(value, basestring) and len(value) <= 500
def prep_value_for_database(self, value, index):
result = []
if value:
# If this a date or a datetime, or something that supports isoformat, then use that
if hasattr(value, "isoformat"):
value = value.isoformat()
if self.number_of_permutations(value) > MAX_COLUMNS_PER_SPECIAL_INDEX*500:
raise ValueError("Can't index for contains query, this value is too long and has too many permutations. \
You can increase the DJANGAE_MAX_COLUMNS_PER_SPECIAL_INDEX setting to fix that. Use with caution.")
if len(value) > CHARACTERS_PER_COLUMN[-1]:
raise ValueError("Can't index for contains query, this value can be maximum {0} characters long.".format(CHARACTERS_PER_COLUMN[-1]))
length = len(value)
result = list(set([value[i:j + 1] for i in xrange(length) for j in xrange(i, length)]))
return result or None
def prep_value_for_query(self, value):
if hasattr(value, "isoformat"):
value = value.isoformat()
else:
value = unicode(value)
value = self.unescape(value)
if value.startswith("%") and value.endswith("%"):
value = value[1:-1]
return value
def indexed_column_name(self, field_column, value, index):
# This we use when we actually query to return the right field for a given
# value length
length = len(value)
column_number = 0
for x in CHARACTERS_PER_COLUMN:
if length > x:
column_number += 1
return "_idx_contains_{0}_{1}".format(field_column, column_number)
class IContainsIndexer(ContainsIndexer):
def prep_value_for_database(self, value, index):
if value is None:
return None
result = super(IContainsIndexer, self).prep_value_for_database(value.lower(), index)
return result if result else None
def indexed_column_name(self, field_column, value, index):
column_name = super(IContainsIndexer, self).indexed_column_name(field_column, value, index)
return column_name.replace('_idx_contains_', '_idx_icontains_')
def prep_value_for_query(self, value):
return super(IContainsIndexer, self).prep_value_for_query(value).lower()
class EndsWithIndexer(Indexer):
"""
dbindexer originally reversed the string and did a startswith on it.
However, this is problematic as it uses an inequality and therefore
limits the queries you can perform. Instead, we store all permutations
of the last characters in a list field. Then we can just do an exact lookup on
the value. Which isn't as nice, but is more flexible.
"""
def validate_can_be_indexed(self, value, negated):
if negated:
return False
return isinstance(value, basestring) and len(value) < 500
def prep_value_for_database(self, value, index):
results = []
for i in xrange(len(value)):
results.append(value[i:])
return results or None
def prep_value_for_query(self, value):
value = self.unescape(value)
if value.startswith("%"):
value = value[1:]
return value
def indexed_column_name(self, field_column, value, index):
return "_idx_endswith_{0}".format(field_column)
class IEndsWithIndexer(EndsWithIndexer):
"""
Same as above, just all lower cased
"""
def prep_value_for_database(self, value, index):
if value is None:
return None
result = super(IEndsWithIndexer, self).prep_value_for_database(value.lower(), index)
return result or None
def prep_value_for_query(self, value):
return super(IEndsWithIndexer, self).prep_value_for_query(value.lower())
def indexed_column_name(self, field_column, value, index):
return "_idx_iendswith_{0}".format(field_column)
class StartsWithIndexer(Indexer):
"""
Although we can do a startswith natively, doing it this way allows us to
use more queries (E.g. we save an exclude)
"""
def validate_can_be_indexed(self, value, negated):
if negated:
return False
return isinstance(value, basestring) and len(value) < 500
def prep_value_for_database(self, value, index):
if isinstance(value, datetime.datetime):
value = value.strftime("%Y-%m-%d %H:%M:%S")
results = []
for i in xrange(1, len(value) + 1):
results.append(value[:i])
if not results:
return None
return results
def prep_value_for_query(self, value):
value = self.unescape(value)
if value.endswith("%"):
value = value[:-1]
return value
def indexed_column_name(self, field_column, value, index):
return "_idx_startswith_{0}".format(field_column)
class IStartsWithIndexer(StartsWithIndexer):
"""
Same as above, just all lower cased
"""
def prep_value_for_database(self, value, index):
return super(IStartsWithIndexer, self).prep_value_for_database(value.lower(), index)
def prep_value_for_query(self, value):
return super(IStartsWithIndexer, self).prep_value_for_query(value.lower())
def indexed_column_name(self, field_column, value, index):
return "_idx_istartswith_{0}".format(field_column)
class RegexIndexer(Indexer):
def prepare_index_type(self, index_type, value):
"""
If we're dealing with RegexIndexer, we create a new index for each
regex pattern. Indexes are called regex__pattern.
"""
return 'regex__{}'.format(value.encode("utf-8").encode('hex'))
def validate_can_be_indexed(self, value, negated):
if negated:
return False
return isinstance(value, bool)
def get_pattern(self, index):
try:
return index.split('__')[1].decode('hex').decode("utf-8")
except IndexError:
return ''
def check_if_match(self, value, index, flags=0):
pattern = self.get_pattern(index)
if value:
if hasattr(value, '__iter__'): # is a list, tuple or set?
if any([bool(re.search(pattern, x, flags)) for x in value]):
return True
else:
if isinstance(value, (int, long)):
value = str(value)
return bool(re.search(pattern, value, flags))
return False
def prep_value_for_database(self, value, index):
return self.check_if_match(value, index)
def prep_value_for_query(self, value):
return True
def indexed_column_name(self, field_column, value, index):
return "_idx_regex_{0}_{1}".format(field_column, self.get_pattern(index).encode("utf-8").encode('hex'))
class IRegexIndexer(RegexIndexer):
def prepare_index_type(self, index_type, value):
return 'iregex__{}'.format(value.encode('hex'))
def prep_value_for_database(self, value, index):
return self.check_if_match(value, index, flags=re.IGNORECASE)
def indexed_column_name(self, field_column, value, index):
return "_idx_iregex_{0}_{1}".format(field_column, self.get_pattern(index).encode('hex'))
REQUIRES_SPECIAL_INDEXES = {
"iexact": IExactIndexer(),
"contains": ContainsIndexer(),
"icontains": IContainsIndexer(),
"day" : DayIndexer(),
"month" : MonthIndexer(),
"year": YearIndexer(),
"week_day": WeekDayIndexer(),
"endswith": EndsWithIndexer(),
"iendswith": IEndsWithIndexer(),
"startswith": StartsWithIndexer(),
"istartswith": IStartsWithIndexer(),
"regex": RegexIndexer(),
"iregex": IRegexIndexer(),
}
| 32.689956 | 148 | 0.661435 |
7942635ae16530fe848af3ddbf212ce93ea61086 | 853 | py | Python | quickstartup/qs_pages/models.py | shahabaz/quickstartup | e351138580d3b332aa309d5d98d562a1ebef5c2c | [
"MIT"
] | 13 | 2015-06-10T03:29:15.000Z | 2021-10-01T22:06:48.000Z | quickstartup/qs_pages/models.py | shahabaz/quickstartup | e351138580d3b332aa309d5d98d562a1ebef5c2c | [
"MIT"
] | 47 | 2015-06-10T03:26:18.000Z | 2021-09-22T17:35:24.000Z | quickstartup/qs_pages/models.py | shahabaz/quickstartup | e351138580d3b332aa309d5d98d562a1ebef5c2c | [
"MIT"
] | 3 | 2015-07-07T23:55:39.000Z | 2020-04-18T10:34:53.000Z | from django.db import models
from django.template import loader
from django.utils.translation import gettext_lazy as _
class Page(models.Model):
slug = models.SlugField(max_length=255, blank=True, unique=True, db_index=True,
help_text=_("URL Path. Example: about for /about/"))
template_name = models.CharField(max_length=255, help_text=_("Template filename. Example: website/about.html"))
login_required = models.BooleanField(default=False)
@property
def path(self):
return "/{}/".format(self.slug) if self.slug else "/"
def __str__(self):
return self.path
def __repr__(self):
return "<Page: {}>".format(self.path)
def get_absolute_url(self):
return self.path
@property
def template(self):
return loader.get_template(self.template_name)
| 30.464286 | 115 | 0.675264 |
7942637a79580286b2f7d45b6ad6cab483ba70f3 | 674 | py | Python | retinaface/pre_trained_models.py | LiveFly/retinaface | 939ae81bbf4ae9333fa14743962dcf99d8db1840 | [
"MIT"
] | 1 | 2020-11-05T11:20:16.000Z | 2020-11-05T11:20:16.000Z | retinaface/pre_trained_models.py | LiveFly/retinaface | 939ae81bbf4ae9333fa14743962dcf99d8db1840 | [
"MIT"
] | null | null | null | retinaface/pre_trained_models.py | LiveFly/retinaface | 939ae81bbf4ae9333fa14743962dcf99d8db1840 | [
"MIT"
] | null | null | null | from collections import namedtuple
from torch.utils import model_zoo
from retinaface.predict_single import Model
model = namedtuple("model", ["url", "model"])
models = {
"resnet50_2020-07-20": model(
url="https://github.com/ternaus/retinaface/releases/download/0.01/retinaface_resnet50_2020-07-20-f168fae3c.zip", # noqa: E501
model=Model,
)
}
def get_model(model_name: str, max_size: int, device: str = "cpu") -> Model:
model = models[model_name].model(max_size=max_size, device=device)
state_dict = model_zoo.load_url(models[model_name].url, progress=True, map_location="cpu")
model.load_state_dict(state_dict)
return model
| 28.083333 | 134 | 0.722552 |
794263b863b93679c3d1a78a91ae69bbc5fad4b9 | 1,874 | py | Python | caproto/tests/test_create_many_channels.py | mattclarke/caproto | 4c3912cfde270f90f75f8c8ee5868e9da1a5095a | [
"BSD-3-Clause"
] | 4 | 2017-02-06T17:46:50.000Z | 2017-04-28T18:08:23.000Z | caproto/tests/test_create_many_channels.py | mattclarke/caproto | 4c3912cfde270f90f75f8c8ee5868e9da1a5095a | [
"BSD-3-Clause"
] | 65 | 2017-02-11T17:23:23.000Z | 2018-03-14T00:03:11.000Z | caproto/tests/test_create_many_channels.py | mattclarke/caproto | 4c3912cfde270f90f75f8c8ee5868e9da1a5095a | [
"BSD-3-Clause"
] | 2 | 2017-02-11T04:21:55.000Z | 2017-03-08T18:47:23.000Z | import logging
import pytest
import curio
import caproto as ca
from .conftest import default_setup_module as setup_module # noqa
from .conftest import default_teardown_module as teardown_module # noqa
from . import conftest
@pytest.mark.parametrize('backend', ['curio', 'trio'])
def test_create_many_channels(ioc, backend):
logging.getLogger('caproto.{}.client'.format(backend)).setLevel('DEBUG')
async def client_test(context):
if context is None:
context = await conftest.get_curio_context()
return await context.create_many_channels(*pvnames,
wait_for_connection=True)
pvnames = list(ioc.pvs.values())
if backend == 'curio':
channels = curio.run(client_test, None)
elif backend == 'trio':
channels = conftest.run_with_trio_context(client_test)
print('got channels:', channels)
connected_channels = [ch for ch in channels.values()
if ch.channel.states[ca.CLIENT] is ca.CONNECTED]
assert len(connected_channels) == len(pvnames)
print('done')
@pytest.mark.parametrize('backend', ['curio', 'trio'])
def test_create_many_channels_with_bad_pv(ioc, backend):
async def client_test(context):
if context is None:
context = await conftest.get_curio_context()
return await context.create_many_channels(*pvnames,
wait_for_connection=True,
move_on_after=2)
pvnames = list(ioc.pvs.values()) + ['_NONEXISTENT_PVNAME_']
if backend == 'curio':
channels = curio.run(client_test, None)
elif backend == 'trio':
channels = conftest.run_with_trio_context(client_test)
assert '_NONEXISTENT_PVNAME_' not in channels
assert len(channels) == len(pvnames) - 1
| 35.358491 | 76 | 0.645678 |
794263de5d51313dcc44eb9c8469f00dbf079907 | 4,242 | py | Python | src/emotion/utils/file_reading.py | Thanatoz-1/EmotionStimuli | f7774cf77ec2a66949949905ed70d62117179666 | [
"BSD-3-Clause"
] | null | null | null | src/emotion/utils/file_reading.py | Thanatoz-1/EmotionStimuli | f7774cf77ec2a66949949905ed70d62117179666 | [
"BSD-3-Clause"
] | 2 | 2021-07-28T10:38:03.000Z | 2021-07-31T10:37:09.000Z | src/emotion/utils/file_reading.py | Thanatoz-1/EmotionStimuli | f7774cf77ec2a66949949905ed70d62117179666 | [
"BSD-3-Clause"
] | null | null | null | __author__ = "Maximilian Wegge"
import random, json, copy
class Data:
"""The Data object stores the unaltered data from a file
and performs preprocessing steps such as splitting into
subsets and converting the annotations' format.
"""
def __init__(
self,
filename: str,
roles: list = ["cause", "cue", "experiencer", "target"],
corpora: list = ["eca", "emotion-stimulus", "reman", "gne"],
splits: list = [1],
) -> None:
"""Initialize the Data object. Read data from file and split
it into subsets.
Args:
filename (str): name of file containing the data.
roles (list, optional): Specifies which emotion roles
to read from the file. Defaults to ["cause", "cue", "experiencer", "target"].
corpora (list, optional): Specifies which corpus/corpora
to load from the file. Defaults to ["eca", "emotion-stimulus", "reman", "gne"].
splits (list, optional): Specifies the size of subsets the data is split into.
Defaults to [1].
"""
self.data = []
self.splits = splits # metadata: amount/size of subsets.
self.split_data = []
self.ReadFile(filename, roles, corpora)
self.SplitData()
def ReadFile(self, filename: str, allow_roles: list, allow_corpora: list) -> None:
"""Load relevant data from file and store it in Data object.
Args:
filename (str): name of file containing the data.
allow_roles (list): Specifies which emotion roles to read from the file.
If there are no annotations for the given emotion roles, annotations of
only 'O' are created.
allow_corpora ([type]): Specifies which corpus/corpora to load from file.
"""
self.data.clear()
with open(filename, "r") as file:
all_data = json.load(file)
for instance in all_data:
if instance["dataset"] in allow_corpora:
relevant_annots = {}
for role in allow_roles:
if role in instance["annotations"]:
relevant_annots[role] = instance["annotations"][role]
else:
relevant_annots[role] = len(instance["tokens"]) * ["O"]
instance["annotations"] = relevant_annots
self.data.append(instance)
else:
pass
return None
def SplitData(self) -> None:
"""Split the data loaded from file into subsets and store
these subsets in the Data object.
"""
self.split_data.clear()
# to preserve the original order of the data,
# shuffle a copy of the data only.
cpy_data = copy.deepcopy(self.data)
random.seed(10)
random.shuffle(cpy_data)
not_split = copy.deepcopy(cpy_data)
for splt in self.splits:
splt_point = int(splt * len(cpy_data))
self.split_data.append(not_split[:splt_point])
not_split = not_split[splt_point:]
return None
def conv2brown(self):
"""Convert the format of each annotation to the format of the brown corpus:
[
(this, "O"),
("is", "O"),
("a", "B),
("sentence", "I"),
(".", ".")
]
"""
# The unaltered data is spreserved as only the annotations
# contained in the subsets are converted.
for splt in self.split_data:
for instance in splt:
tokens = instance["tokens"]
orig = instance["annotations"]
brown = {}
for label in orig:
brown[label] = []
for tup in zip(tokens, orig[label]):
# Set tag for full stop (".") to "."
# (necessary for training and predicting).
if tup[0] == ".":
brown[label].append((tup[0], "."))
else:
brown[label].append((tup[0].lower(), tup[1]))
instance["annotations"] = brown
| 36.568966 | 91 | 0.537011 |
794264835aaf13d9ef67f64a411711bd736edc8e | 5,065 | py | Python | broqer/op/filter_.py | semiversus/python-broqer | 131a78b4e475c4134bc32e035b833c8b162cdff2 | [
"MIT"
] | 74 | 2018-04-13T11:29:16.000Z | 2021-05-08T17:55:13.000Z | broqer/op/filter_.py | semiversus/python-broqer | 131a78b4e475c4134bc32e035b833c8b162cdff2 | [
"MIT"
] | 36 | 2018-06-13T04:00:12.000Z | 2022-03-01T12:13:38.000Z | broqer/op/filter_.py | semiversus/python-broqer | 131a78b4e475c4134bc32e035b833c8b162cdff2 | [
"MIT"
] | 6 | 2019-04-17T17:33:11.000Z | 2021-05-08T17:55:32.000Z | """
Filters values based on a ``predicate`` function
Usage:
>>> from broqer import Value, op, Sink
>>> s = Value()
>>> filtered_publisher = s | op.Filter(lambda v:v>0)
>>> _disposable = filtered_publisher.subscribe(Sink(print))
>>> s.emit(1)
1
>>> s.emit(-1)
>>> s.emit(0)
>>> _disposable.dispose()
Also possible with additional args and kwargs:
>>> import operator
>>> filtered_publisher = s | op.Filter(operator.and_, 0x01)
>>> _disposable = filtered_publisher.subscribe(Sink(print))
>>> s.emit(100)
>>> s.emit(101)
101
"""
from functools import partial, wraps
from typing import Any, Callable
from broqer import NONE, Publisher
from broqer.operator import Operator
class Filter(Operator):
""" Filter object applied to publisher
:param predicate: function to evaluate the filtering
:param \\*args: variable arguments to be used for evaluating predicate
:param unpack: value from emits will be unpacked (\\*value)
:param \\*\\*kwargs: keyword arguments to be used for evaluating predicate
"""
def __init__(self, predicate: Callable[[Any], bool], *args,
unpack: bool = False, **kwargs) -> None:
Operator.__init__(self)
self._predicate = partial(predicate, *args, **kwargs) # type: Callable
self._unpack = unpack
def get(self) -> Any:
if self._originator is None:
raise ValueError('Operator is missing originator')
if self._subscriptions:
return self._state
value = self._originator.get() # type: Any
if self._unpack:
# assert isinstance(value, (list, tuple))
if self._predicate(*value):
return value
elif self._predicate(value):
return value
return NONE
def emit(self, value: Any, who: Publisher) -> None:
if who is not self._originator:
raise ValueError('Emit from non assigned publisher')
if self._unpack:
if self._predicate(*value):
return Publisher.notify(self, value)
elif self._predicate(value):
return Publisher.notify(self, value)
return None
class EvalTrue(Operator):
""" Emits all values which evaluates for True.
This operator can be used in the pipline style (v | EvalTrue()) or as
standalone operation (EvalTrue(v)).
"""
def __init__(self, publisher: Publisher = None) -> None:
Operator.__init__(self)
self._originator = publisher
def get(self) -> Any:
if self._subscriptions:
return self._state
assert isinstance(self._originator, Publisher)
value = self._originator.get() # type: Any
if bool(value):
return value
return NONE
def emit(self, value: Any, who: Publisher) -> None:
if who is not self._originator:
raise ValueError('Emit from non assigned publisher')
if bool(value):
return Publisher.notify(self, value)
return None
class EvalFalse(Operator):
""" Filters all emits which evaluates for False.
This operator can be used in the pipline style (v | EvalFalse() or as
standalone operation (EvalFalse(v))."""
def __init__(self, publisher: Publisher = None) -> None:
Operator.__init__(self)
self._originator = publisher
def get(self) -> Any:
if self._subscriptions:
return self._state
assert isinstance(self._originator, Publisher)
value = self._originator.get() # type: Any
if not bool(value):
return value
return NONE
def emit(self, value: Any, who: Publisher) -> None:
if who is not self._originator:
raise ValueError('Emit from non assigned publisher')
if not bool(value):
return Publisher.notify(self, value)
return None
def build_filter(predicate: Callable[[Any], bool] = None, *,
unpack: bool = False):
""" Decorator to wrap a function to return a Filter operator.
:param function: function to be wrapped
:param unpack: value from emits will be unpacked (*value)
"""
def _build_filter(predicate):
return Filter(predicate, unpack=unpack)
if predicate:
return _build_filter(predicate)
return _build_filter
def build_filter_factory(predicate: Callable[[Any], bool] = None, *,
unpack: bool = False):
""" Decorator to wrap a function to return a factory for Filter operators.
:param predicate: function to be wrapped
:param unpack: value from emits will be unpacked (*value)
"""
def _build_filter(predicate: Callable[[Any], bool]):
@wraps(predicate)
def _wrapper(*args, **kwargs) -> Filter:
if 'unpack' in kwargs:
raise TypeError('"unpack" has to be defined by decorator')
return Filter(predicate, *args, unpack=unpack, **kwargs)
return _wrapper
if predicate:
return _build_filter(predicate)
return _build_filter
| 28.455056 | 79 | 0.630405 |
7942649ac9d35aaa97e88de5fd8c110df328853b | 1,565 | py | Python | core-site/other-pages/blog-posts/0-projects/calendar-this-solution/calendar-this/solution/app/forms.py | Web-Dev-Collaborative/Web-Dev-Hub | 16ad7cfbeb97b31c8510605033b91d75b63e527e | [
"MIT"
] | null | null | null | core-site/other-pages/blog-posts/0-projects/calendar-this-solution/calendar-this/solution/app/forms.py | Web-Dev-Collaborative/Web-Dev-Hub | 16ad7cfbeb97b31c8510605033b91d75b63e527e | [
"MIT"
] | null | null | null | core-site/other-pages/blog-posts/0-projects/calendar-this-solution/calendar-this/solution/app/forms.py | Web-Dev-Collaborative/Web-Dev-Hub | 16ad7cfbeb97b31c8510605033b91d75b63e527e | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
from flask_wtf import FlaskForm
from wtforms.fields import (
BooleanField, DateField, StringField, SubmitField, TextAreaField, TimeField
)
from wtforms.widgets.html5 import DateInput, TimeInput
from wtforms.validators import DataRequired, ValidationError
def next_block(delta=0):
def time():
now = datetime.now()
return now - timedelta(minutes=now.minute % 15 - delta - 15,
seconds=now.second)
return time
v = [DataRequired()]
di = {'default': datetime.now, 'widget': DateInput()}
sti = {'default': next_block(), 'widget': TimeInput()}
eti = {'default': next_block(60), 'widget': TimeInput()}
class AppointmentForm(FlaskForm):
name = StringField("Name", v)
start_date = DateField("Start date", v, **di)
start_time = TimeField("Start time", v, **sti)
end_date = DateField("End date", v, **di)
end_time = TimeField("End time", v, **eti)
description = TextAreaField("Description", v)
private = BooleanField("Private?")
submit = SubmitField("Create an appointment")
def validate_end_date(form, field):
start = datetime.combine(form.start_date.data, form.start_time.data)
end = datetime.combine(field.data, form.end_time.data)
if start >= end:
msg = "End date/time must come after start date/time"
raise ValidationError(msg)
if form.start_date.data != form.end_date.data:
msg = "End date must be the same as start date"
raise ValidationError(msg)
| 36.395349 | 79 | 0.664537 |
79426548ebb9e0ea87ddacefe3c8bcc87ddadd63 | 12,426 | py | Python | src/config/fabric-ansible/ansible-playbooks/filter_plugins/ztp_filters.py | atsgen/tf-controller | 9321889cdd3d7108980cc88937b2e82956502cc5 | [
"Apache-2.0"
] | null | null | null | src/config/fabric-ansible/ansible-playbooks/filter_plugins/ztp_filters.py | atsgen/tf-controller | 9321889cdd3d7108980cc88937b2e82956502cc5 | [
"Apache-2.0"
] | null | null | null | src/config/fabric-ansible/ansible-playbooks/filter_plugins/ztp_filters.py | atsgen/tf-controller | 9321889cdd3d7108980cc88937b2e82956502cc5 | [
"Apache-2.0"
] | 1 | 2020-12-18T18:22:53.000Z | 2020-12-18T18:22:53.000Z | #!/usr/bin/python
#
# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.
#
# This file contains code to gather IPAM config from the fabric management
# virtual network
#
from builtins import object
import logging
import time
from cfgm_common.exceptions import NoIdError
from netaddr import IPNetwork
from netifaces import AF_INET, ifaddresses, interfaces
from pyroute2 import IPRoute
from vnc_api.vnc_api import VncApi
from job_manager.job_utils import JobVncApi # noqa
class FilterModule(object):
ZTP_EXCHANGE = 'device_ztp_exchange'
ZTP_EXCHANGE_TYPE = 'direct'
CONFIG_FILE_ROUTING_KEY = 'device_ztp.config.file'
TFTP_FILE_ROUTING_KEY = 'device_ztp.tftp.file'
ZTP_REQUEST_ROUTING_KEY = 'device_ztp.request'
ZTP_RESPONSE_ROUTING_KEY = 'device_ztp.response.'
def filters(self):
return {
'ztp_dhcp_config': self.get_ztp_dhcp_config,
'ztp_tftp_config': self.get_ztp_tftp_config,
'create_tftp_file': self.create_tftp_file,
'delete_tftp_file': self.delete_tftp_file,
'create_dhcp_file': self.create_dhcp_file,
'delete_dhcp_file': self.delete_dhcp_file,
'restart_dhcp_server': self.restart_dhcp_server,
'read_dhcp_leases_using_count': self.read_dhcp_leases_using_count,
'read_dhcp_leases_using_info': self.read_dhcp_leases_using_info,
'read_only_dhcp_leases': self.read_only_dhcp_leases,
'remove_stale_pr_objects': self.remove_stale_pr_objects,
}
# Method to get interface name and configured ip address from
# subnet/ip address from subnet.
@classmethod
def get_host_ip_and_name(cls, subnet):
ip = IPRoute()
lookup_ip = ''
route_lst = ip.route('get',
dst=(subnet['subnet']['ip_prefix'] +
'/' +
str(subnet['subnet']['ip_prefix_len'])))
for tup in route_lst[0]['attrs'] or []:
if tup[0] == 'RTA_PREFSRC':
lookup_ip = str(tup[1])
for ifaceName in interfaces() or []:
addresses = [i['addr'] for i in ifaddresses(ifaceName)
.setdefault(AF_INET, [{'addr': 'No IP addr'}])]
if (addresses[0]) == lookup_ip.decode('utf-8'):
return lookup_ip, ifaceName
@classmethod
def get_ztp_dhcp_config(cls, job_ctx, fabric_uuid):
dhcp_config = {}
try:
vncapi = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
auth_token=job_ctx.get('auth_token'))
fabric = vncapi.fabric_read(id=fabric_uuid)
fabric_dict = vncapi.obj_to_dict(fabric)
# From here we get the 'management' type virtual network
vn_uuid = None
virtual_network_refs = fabric_dict.get(
'virtual_network_refs') or []
for virtual_net_ref in virtual_network_refs:
if 'management' in virtual_net_ref['attr']['network_type']:
vn_uuid = virtual_net_ref['uuid']
break
if vn_uuid is None:
raise NoIdError("Cannot find mgmt virtual network on fabric")
virtual_net = vncapi.virtual_network_read(id=vn_uuid)
virtual_net_dict = vncapi.obj_to_dict(virtual_net)
# Get the IPAM attached to the virtual network
ipam_refs = virtual_net_dict.get('network_ipam_refs')
if ipam_refs:
ipam_ref = ipam_refs[0]
ipam = vncapi.network_ipam_read(id=ipam_ref['uuid'])
ipam_dict = vncapi.obj_to_dict(ipam)
ipam_subnets = ipam_dict.get('ipam_subnets')
if ipam_subnets:
dhcp_config['ipam_subnets'] = ipam_subnets.get('subnets')
# To support multiple subnet and interface for DHCP, each dhcp
# option is tagged with interface name. eg.
# dhcp-option=set:eth0, <ip-range start> <ip-range end>.
for subnet in dhcp_config['ipam_subnets']:
intf_ip, intf_name = cls.get_host_ip_and_name(subnet)
if intf_ip and intf_name:
subnet.update({'intf_ip': intf_ip})
subnet.update({'intf_name': intf_name})
cidr = subnet['subnet']['ip_prefix'] +\
"/" + str(subnet['subnet']['ip_prefix_len'])
ip = IPNetwork(cidr)
if len(ip) > 0:
subnet.update({'name': str(ip.ip).replace('.', '')})
# Get static ip configuration for physical routers
pr_refs = fabric.get_physical_router_back_refs() or []
pr_uuids = [ref['uuid'] for ref in pr_refs]
static_ips = {}
for pr_uuid in pr_uuids:
pr = vncapi.physical_router_read(id=pr_uuid)
pr_dict = vncapi.obj_to_dict(pr)
mac = pr_dict.get('physical_router_management_mac')
ip = pr_dict.get('physical_router_management_ip')
if mac and ip:
static_ips[ip] = mac
if static_ips:
dhcp_config['static_ips'] = static_ips
# Get user-specified static ip configuration
static_host_ips = {}
dynamic_hosts = []
job_input = job_ctx.get('job_input', {})
device_to_ztp = job_input.get('device_to_ztp', [])
for dev in device_to_ztp:
mgmt_ip = dev.get('mgmt_ip')
sernum = dev.get('serial_number')
if sernum:
if mgmt_ip:
static_host_ips[mgmt_ip] = sernum
else:
dynamic_hosts.append(sernum)
if static_host_ips:
dhcp_config['static_host_ips'] = static_host_ips
if dynamic_hosts:
dhcp_config['dynamic_hosts'] = dynamic_hosts
except Exception as ex:
logging.error(
"Error getting ZTP DHCP configuration: {}".format(ex))
return dhcp_config
# end get_ztp_dhcp_config
@classmethod
def get_ztp_tftp_config(cls, job_ctx, dev_password=None):
tftp_config = {}
if job_ctx:
device_creds = job_ctx['job_input'].get('device_auth')
if device_creds:
password = device_creds['root_password']
tftp_config['password'] = password
if dev_password:
tftp_config['password'] = dev_password
return tftp_config
# end get_ztp_tftp_config
@classmethod
def create_tftp_file(cls, file_contents, file_name,
fabric_name, job_ctx):
return cls._publish_file(file_name, file_contents, 'create',
cls.TFTP_FILE_ROUTING_KEY, fabric_name,
job_ctx)
# end create_tftp_file
@classmethod
def delete_tftp_file(cls, file_name, fabric_name, job_ctx):
return cls._publish_file(file_name, '', 'delete',
cls.TFTP_FILE_ROUTING_KEY,
fabric_name, job_ctx)
# end delete_tftp_file
@classmethod
def create_dhcp_file(cls, file_contents, file_name,
fabric_name, job_ctx):
return cls._publish_file(file_name, file_contents, 'create',
cls.CONFIG_FILE_ROUTING_KEY, fabric_name,
job_ctx)
# end create_dhcp_file
@classmethod
def delete_dhcp_file(cls, file_name, fabric_name, job_ctx):
return cls._publish_file(file_name, '', 'delete',
cls.CONFIG_FILE_ROUTING_KEY,
fabric_name, job_ctx)
# end delete_dhcp_file
@classmethod
def read_dhcp_leases_using_count(cls, device_count, ipam_subnets,
file_name, fabric_name, job_ctx):
return cls.read_dhcp_leases(ipam_subnets, file_name, fabric_name,
job_ctx, 'device_count', int(device_count))
# end read_dhcp_leases_using_count
@classmethod
def read_dhcp_leases_using_info(cls, device_to_ztp, ipam_subnets,
file_name, fabric_name, job_ctx):
return cls.read_dhcp_leases(ipam_subnets, file_name, fabric_name,
job_ctx, 'device_to_ztp', device_to_ztp)
# end read_dhcp_leases_using_info
@classmethod
def read_only_dhcp_leases(cls, device_to_ztp, ipam_subnets, file_name,
fabric_name, job_ctx):
return cls.read_dhcp_leases(ipam_subnets, file_name, fabric_name,
job_ctx, 'device_to_ztp', device_to_ztp,
action='read')
# end read_only_dhcp_leases
@classmethod
def read_dhcp_leases(cls, ipam_subnets, file_name, fabric_name, job_ctx,
payload_key, payload_value, action='create'):
vnc_api = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
auth_token=job_ctx.get('auth_token'),
timeout=600)
headers = {
'fabric_name': fabric_name,
'file_name': file_name,
'action': action
}
payload = {
'ipam_subnets': ipam_subnets
}
payload[payload_key] = payload_value
return vnc_api.amqp_request(
exchange=cls.ZTP_EXCHANGE,
exchange_type=cls.ZTP_EXCHANGE_TYPE,
routing_key=cls.ZTP_REQUEST_ROUTING_KEY,
response_key=cls.ZTP_RESPONSE_ROUTING_KEY + fabric_name,
headers=headers, payload=payload)
# end read_dhcp_leases
@classmethod
def restart_dhcp_server(cls, file_name, fabric_name, job_ctx):
vnc_api = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
auth_token=job_ctx.get('auth_token'))
headers = {
'fabric_name': fabric_name,
'file_name': file_name,
'action': 'delete'
}
vnc_api.amqp_publish(exchange=cls.ZTP_EXCHANGE,
exchange_type=cls.ZTP_EXCHANGE_TYPE,
routing_key=cls.ZTP_REQUEST_ROUTING_KEY,
headers=headers,
payload={})
return {'status': 'success'}
# end restart_dhcp_server
@classmethod
def remove_stale_pr_objects(cls, job_ctx):
"""
Clean up stale temporary PR objects when
ZTP workflow fails.
"""
filters = {}
try:
vnc_api = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
auth_token=job_ctx.get('auth_token'))
except Exception as ex:
logging.error("Error connecting to API server: {}".format(ex))
return True
# A case was noticed where the object deletion is attempted
# before it is even created. To avoid this, wait for a
# couple of seconds before trying to delete the PR
time.sleep(2)
filters['physical_router_managed_state'] = "dhcp"
pr_list = vnc_api.physical_routers_list(
filters=filters).get('physical-routers')
for pr in pr_list:
vnc_api.physical_router_delete(id=pr['uuid'])
logging.info("Router {} in dhcp state deleted".format(
pr['fq_name'][-1]))
return True
# end remove_stale_pr_objects
@classmethod
def _publish_file(cls, name, contents, action, routing_key,
fabric_name, job_ctx):
vnc_api = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
auth_token=job_ctx.get('auth_token'))
headers = {
'fabric_name': fabric_name,
'file_name': name,
'action': action
}
vnc_api.amqp_publish(exchange=cls.ZTP_EXCHANGE,
exchange_type=cls.ZTP_EXCHANGE_TYPE,
routing_key=routing_key, headers=headers,
payload=contents)
return {'status': 'success'}
# end _publish_file
| 40.875 | 79 | 0.580235 |
794265ab97d7e3b84a214322bf4f6d471b577fca | 1,972 | py | Python | docs/conf.py | robarnold/bastille | 38bb7faabf7f69051a9c088c17189c60769b07d7 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | robarnold/bastille | 38bb7faabf7f69051a9c088c17189c60769b07d7 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | robarnold/bastille | 38bb7faabf7f69051a9c088c17189c60769b07d7 | [
"BSD-3-Clause"
] | null | null | null | import os
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'sphinx_rtd_theme'
# -- Project information -----------------------------------------------------
project = 'Bastille'
copyright = '2018-2021, Christer Edwards'
author = 'Christer Edwards'
# The short X.Y version
version = '0.9.20211225'
# The full version, including alpha/beta/rc tags
release = '0.9.20211225-beta'
# -- General configuration ---------------------------------------------------
extensions = [
]
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
master_doc = 'index'
language = None
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = None
# -- Options for HTML output -------------------------------------------------
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------------
htmlhelp_basename = 'Bastilledoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
}
latex_documents = [
(master_doc, 'Bastille.tex', 'Bastille Documentation',
'Christer Edwards', 'manual'),
]
# -- Options for manual page output ------------------------------------------
man_pages = [
(master_doc, 'bastille', 'Bastille Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
texinfo_documents = [
(master_doc, 'Bastille', 'Bastille Documentation',
author, 'Bastille', 'Bastille is an open-source system for automating deployment and management of containerized applications on FreeBSD.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
epub_title = project
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| 24.65 | 144 | 0.564909 |
79426813e38fee68b9399a293ff47b1c3e4c1f56 | 6,712 | py | Python | sqlserver_ado/ado_consts.py | BangC/django-mssql | 998c7a0c94f2906bc79f6cf8b74a5a53420f3714 | [
"MIT"
] | null | null | null | sqlserver_ado/ado_consts.py | BangC/django-mssql | 998c7a0c94f2906bc79f6cf8b74a5a53420f3714 | [
"MIT"
] | null | null | null | sqlserver_ado/ado_consts.py | BangC/django-mssql | 998c7a0c94f2906bc79f6cf8b74a5a53420f3714 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
# ADO enumerated constants documented on MSDN:
# http://msdn.microsoft.com/en-us/library/ms678353(VS.85).aspx
# IsolationLevelEnum
adXactUnspecified = -1
adXactBrowse = 0x100
adXactChaos = 0x10
adXactCursorStability = 0x1000
adXactIsolated = 0x100000
adXactReadCommitted = 0x1000
adXactReadUncommitted = 0x100
adXactRepeatableRead = 0x10000
adXactSerializable = 0x100000
# CursorLocationEnum
adUseClient = 3
adUseServer = 2
# CursorTypeEnum
adOpenDynamic = 2
adOpenForwardOnly = 0
adOpenKeyset = 1
adOpenStatic = 3
adOpenUnspecified = -1
# CommandTypeEnum
adCmdText = 1
adCmdStoredProc = 4
# ParameterDirectionEnum
adParamInput = 1
adParamInputOutput = 3
adParamOutput = 2
adParamReturnValue = 4
adParamUnknown = 0
# ObjectStateEnum
adStateClosed = 0
adStateOpen = 1
adStateConnecting = 2
adStateExecuting = 4
adStateFetching = 8
# FieldAttributeEnum
adFldMayBeNull = 0x40
# ConnectModeEnum
adModeUnknown = 0
adModeRead = 1
adModeWrite = 2
adModeReadWrite = 3
adModeShareDenyRead = 4
adModeShareDenyWrite = 8
adModeShareExclusive = 12
adModeShareDenyNone = 16
adModeRecursive = 0x400000
# XactAttributeEnum
adXactCommitRetaining = 131072
adXactAbortRetaining = 262144
ado_error_TIMEOUT = -2147217871
# DataTypeEnum - ADO Data types documented at:
# http://msdn2.microsoft.com/en-us/library/ms675318.aspx
adArray = 0x2000
adEmpty = 0x0
adBSTR = 0x8
adBigInt = 0x14
adBinary = 0x80
adBoolean = 0xb
adChapter = 0x88
adChar = 0x81
adCurrency = 0x6
adDBDate = 0x85
adDBTime = 0x86
adDBTimeStamp = 0x87
adDate = 0x7
adDecimal = 0xe
adDouble = 0x5
adError = 0xa
adFileTime = 0x40
adGUID = 0x48
adIDispatch = 0x9
adIUnknown = 0xd
adInteger = 0x3
adLongVarBinary = 0xcd
adLongVarChar = 0xc9
adLongVarWChar = 0xcb
adNumeric = 0x83
adPropVariant = 0x8a
adSingle = 0x4
adSmallInt = 0x2
adTinyInt = 0x10
adUnsignedBigInt = 0x15
adUnsignedInt = 0x13
adUnsignedSmallInt = 0x12
adUnsignedTinyInt = 0x11
adUserDefined = 0x84
adVarBinary = 0xCC
adVarChar = 0xC8
adVarNumeric = 0x8B
adVarWChar = 0xCA
adVariant = 0xC
adWChar = 0x82
adTypeNames = {
adBSTR: 'adBSTR',
adBigInt: 'adBigInt',
adBinary: 'adBinary',
adBoolean: 'adBoolean',
adChapter: 'adChapter',
adChar: 'adChar',
adCurrency: 'adCurrency',
adDBDate: 'adDBDate',
adDBTime: 'adDBTime',
adDBTimeStamp: 'adDBTimeStamp',
adDate: 'adDate',
adDecimal: 'adDecimal',
adDouble: 'adDouble',
adEmpty: 'adEmpty',
adError: 'adError',
adFileTime: 'adFileTime',
adGUID: 'adGUID',
adIDispatch: 'adIDispatch',
adIUnknown: 'adIUnknown',
adInteger: 'adInteger',
adLongVarBinary: 'adLongVarBinary',
adLongVarChar: 'adLongVarChar',
adLongVarWChar: 'adLongVarWChar',
adNumeric: 'adNumeric',
adPropVariant: 'adPropVariant',
adSingle: 'adSingle',
adSmallInt: 'adSmallInt',
adTinyInt: 'adTinyInt',
adUnsignedBigInt: 'adUnsignedBigInt',
adUnsignedInt: 'adUnsignedInt',
adUnsignedSmallInt: 'adUnsignedSmallInt',
adUnsignedTinyInt: 'adUnsignedTinyInt',
adUserDefined: 'adUserDefined',
adVarBinary: 'adVarBinary',
adVarChar: 'adVarChar',
adVarNumeric: 'adVarNumeric',
adVarWChar: 'adVarWChar',
adVariant: 'adVariant',
adWChar: 'adWChar',
}
def ado_type_name(ado_type):
return adTypeNames.get(ado_type, 'unknown type ('+str(ado_type)+')')
# Error codes to names
adoErrors= {
0xe7b :'adErrBoundToCommand',
0xe94 :'adErrCannotComplete',
0xea4 :'adErrCantChangeConnection',
0xc94 :'adErrCantChangeProvider',
0xe8c :'adErrCantConvertvalue',
0xe8d :'adErrCantCreate',
0xea3 :'adErrCatalogNotSet',
0xe8e :'adErrColumnNotOnThisRow',
0xd5d :'adErrDataConversion',
0xe89 :'adErrDataOverflow',
0xe9a :'adErrDelResOutOfScope',
0xea6 :'adErrDenyNotSupported',
0xea7 :'adErrDenyTypeNotSupported',
0xcb3 :'adErrFeatureNotAvailable',
0xea5 :'adErrFieldsUpdateFailed',
0xc93 :'adErrIllegalOperation',
0xcae :'adErrInTransaction',
0xe87 :'adErrIntegrityViolation',
0xbb9 :'adErrInvalidArgument',
0xe7d :'adErrInvalidConnection',
0xe7c :'adErrInvalidParamInfo',
0xe82 :'adErrInvalidTransaction',
0xe91 :'adErrInvalidURL',
0xcc1 :'adErrItemNotFound',
0xbcd :'adErrNoCurrentRecord',
0xe83 :'adErrNotExecuting',
0xe7e :'adErrNotReentrant',
0xe78 :'adErrObjectClosed',
0xd27 :'adErrObjectInCollection',
0xd5c :'adErrObjectNotSet',
0xe79 :'adErrObjectOpen',
0xbba :'adErrOpeningFile',
0xe80 :'adErrOperationCancelled',
0xe96 :'adErrOutOfSpace',
0xe88 :'adErrPermissionDenied',
0xe9e :'adErrPropConflicting',
0xe9b :'adErrPropInvalidColumn',
0xe9c :'adErrPropInvalidOption',
0xe9d :'adErrPropInvalidValue',
0xe9f :'adErrPropNotAllSettable',
0xea0 :'adErrPropNotSet',
0xea1 :'adErrPropNotSettable',
0xea2 :'adErrPropNotSupported',
0xbb8 :'adErrProviderFailed',
0xe7a :'adErrProviderNotFound',
0xbbb :'adErrReadFile',
0xe93 :'adErrResourceExists',
0xe92 :'adErrResourceLocked',
0xe97 :'adErrResourceOutOfScope',
0xe8a :'adErrSchemaViolation',
0xe8b :'adErrSignMismatch',
0xe81 :'adErrStillConnecting',
0xe7f :'adErrStillExecuting',
0xe90 :'adErrTreePermissionDenied',
0xe8f :'adErrURLDoesNotExist',
0xe99 :'adErrURLNamedRowDoesNotExist',
0xe98 :'adErrUnavailable',
0xe84 :'adErrUnsafeOperation',
0xe95 :'adErrVolumeNotFound',
0xbbc :'adErrWriteFile'
}
| 30.930876 | 72 | 0.601162 |
794268b1de1fd7e835c35f815b48bbc86df1aedc | 16,337 | py | Python | pymavlink-2.3.8/generator/mavgen_wlua.py | NicEscobar/InertialNavigation | 85dffed6cf5c4063a097c3c9305f4ec92ce53623 | [
"MIT"
] | 10 | 2021-03-15T03:58:06.000Z | 2021-12-30T15:33:38.000Z | pymavlink-2.3.8/generator/mavgen_wlua.py | NicEscobar/InertialNavigation | 85dffed6cf5c4063a097c3c9305f4ec92ce53623 | [
"MIT"
] | 4 | 2021-05-03T16:58:53.000Z | 2021-12-21T21:01:02.000Z | pymavlink-2.3.8/generator/mavgen_wlua.py | NicEscobar/InertialNavigation | 85dffed6cf5c4063a097c3c9305f4ec92ce53623 | [
"MIT"
] | 9 | 2021-04-28T15:26:34.000Z | 2021-12-21T20:41:30.000Z | #!/usr/bin/env python
'''
parse a MAVLink protocol XML file and generate a Wireshark LUA dissector
Copyright Holger Steinhaus 2012
Released under GNU GPL version 3 or later
Instructions for use:
1. python -m pymavlink.tools.mavgen --lang=WLua mymavlink.xml -o ~/.wireshark/plugins/mymavlink.lua
2. convert binary stream int .pcap file format (see ../examples/mav2pcap.py)
3. open the pcap file in Wireshark
'''
from __future__ import print_function
from builtins import range
import os
import re
from . import mavparse, mavtemplate
t = mavtemplate.MAVTemplate()
def lua_type(mavlink_type):
# qnd typename conversion
if (mavlink_type=='char'):
lua_t = 'uint8'
else:
lua_t = mavlink_type.replace('_t', '')
return lua_t
def type_size(mavlink_type):
# infer size of mavlink types
re_int = re.compile('^(u?)int(8|16|32|64)_t$')
int_parts = re_int.findall(mavlink_type)
if len(int_parts):
return (int(int_parts[0][1]) // 8)
elif mavlink_type == 'float':
return 4
elif mavlink_type == 'double':
return 8
elif mavlink_type == 'char':
return 1
else:
raise Exception('unsupported MAVLink type - please fix me')
def mavfmt(field):
'''work out the struct format for a type'''
map = {
'float' : 'f',
'double' : 'd',
'char' : 'c',
'int8_t' : 'b',
'uint8_t' : 'B',
'uint8_t_mavlink_version' : 'B',
'int16_t' : 'h',
'uint16_t' : 'H',
'int32_t' : 'i',
'uint32_t' : 'I',
'int64_t' : 'q',
'uint64_t' : 'Q',
}
if field.array_length:
if field.type in ['char', 'int8_t', 'uint8_t']:
return str(field.array_length)+'s'
return str(field.array_length)+map[field.type]
return map[field.type]
def generate_preamble(outf):
print("Generating preamble")
t.write(outf,
"""
-- Wireshark dissector for the MAVLink protocol (please see http://qgroundcontrol.org/mavlink/start for details)
unknownFrameBeginOffset = 0
local bit = require "bit32"
mavlink_proto = Proto("mavlink_proto", "MAVLink protocol")
f = mavlink_proto.fields
-- from http://lua-users.org/wiki/TimeZone
local function get_timezone()
local now = os.time()
return os.difftime(now, os.time(os.date("!*t", now)))
end
local signature_time_ref = get_timezone() + os.time{year=2015, month=1, day=1, hour=0}
payload_fns = {}
""" )
def generate_body_fields(outf):
t.write(outf,
"""
f.magic = ProtoField.uint8("mavlink_proto.magic", "Magic value / version", base.HEX)
f.length = ProtoField.uint8("mavlink_proto.length", "Payload length")
f.incompatibility_flag = ProtoField.uint8("mavlink_proto.incompatibility_flag", "Incompatibility flag")
f.compatibility_flag = ProtoField.uint8("mavlink_proto.compatibility_flag", "Compatibility flag")
f.sequence = ProtoField.uint8("mavlink_proto.sequence", "Packet sequence")
f.sysid = ProtoField.uint8("mavlink_proto.sysid", "System id", base.HEX)
f.compid = ProtoField.uint8("mavlink_proto.compid", "Component id", base.HEX)
f.msgid = ProtoField.uint24("mavlink_proto.msgid", "Message id", base.HEX)
f.payload = ProtoField.uint8("mavlink_proto.payload", "Payload", base.DEC, messageName)
f.crc = ProtoField.uint16("mavlink_proto.crc", "Message CRC", base.HEX)
f.signature_link = ProtoField.uint8("mavlink_proto.signature_link", "Link id", base.DEC)
f.signature_time = ProtoField.absolute_time("mavlink_proto.signature_time", "Time")
f.signature_signature = ProtoField.bytes("mavlink_proto.signature_signature", "Signature")
f.rawheader = ProtoField.bytes("mavlink_proto.rawheader", "Unparsable header fragment")
f.rawpayload = ProtoField.bytes("mavlink_proto.rawpayload", "Unparsable payload")
""")
def generate_msg_table(outf, msgs):
t.write(outf, """
messageName = {
""")
for msg in msgs:
assert isinstance(msg, mavparse.MAVType)
t.write(outf, """
[${msgid}] = '${msgname}',
""", {'msgid':msg.id, 'msgname':msg.name})
t.write(outf, """
}
""")
def generate_msg_fields(outf, msg):
assert isinstance(msg, mavparse.MAVType)
for f in msg.fields:
assert isinstance(f, mavparse.MAVField)
mtype = f.type
ltype = lua_type(mtype)
count = f.array_length if f.array_length>0 else 1
# string is no array, but string of chars
if mtype == 'char' and count > 1:
count = 1
ltype = 'string'
for i in range(0,count):
if count>1:
array_text = '[' + str(i) + ']'
index_text = '_' + str(i)
else:
array_text = ''
index_text = ''
t.write(outf,
"""
f.${fmsg}_${fname}${findex} = ProtoField.${ftype}("mavlink_proto.${fmsg}_${fname}${findex}", "${fname}${farray} (${ftype})")
""", {'fmsg':msg.name, 'ftype':ltype, 'fname':f.name, 'findex':index_text, 'farray':array_text})
t.write(outf, '\n\n')
def generate_field_dissector(outf, msg, field):
assert isinstance(field, mavparse.MAVField)
mtype = field.type
size = type_size(mtype)
ltype = lua_type(mtype)
count = field.array_length if field.array_length>0 else 1
# string is no array but string of chars
if mtype == 'char':
size = count
count = 1
# handle arrays, but not strings
for i in range(0,count):
if count>1:
index_text = '_' + str(i)
else:
index_text = ''
t.write(outf,
"""
if (truncated) then
tree:add_le(f.${fmsg}_${fname}${findex}, 0)
elseif (offset + ${fbytes} <= limit) then
tree:add_le(f.${fmsg}_${fname}${findex}, buffer(offset, ${fbytes}))
offset = offset + ${fbytes}
elseif (offset < limit) then
tree:add_le(f.${fmsg}_${fname}${findex}, buffer(offset, limit - offset))
offset = limit
truncated = true
else
tree:add_le(f.${fmsg}_${fname}${findex}, 0)
truncated = true
end
""", {'fname':field.name, 'ftype':mtype, 'fmsg': msg.name, 'fbytes':size, 'findex':index_text})
def generate_payload_dissector(outf, msg):
assert isinstance(msg, mavparse.MAVType)
t.write(outf,
"""
-- dissect payload of message type ${msgname}
function payload_fns.payload_${msgid}(buffer, tree, msgid, offset, limit)
local truncated = false
""", {'msgid':msg.id, 'msgname':msg.name})
for f in msg.ordered_fields:
generate_field_dissector(outf, msg, f)
t.write(outf,
"""
return offset
end
""")
def generate_packet_dis(outf):
t.write(outf,
"""
-- dissector function
function mavlink_proto.dissector(buffer,pinfo,tree)
local offset = 0
local msgCount = 0
-- loop through the buffer to extract all the messages in the buffer
while (offset < buffer:len())
do
msgCount = msgCount + 1
local subtree = tree:add (mavlink_proto, buffer(), "MAVLink Protocol ("..buffer:len()..")")
-- decode protocol version first
local version = buffer(offset,1):uint()
local protocolString = ""
while (true)
do
if (version == 0xfe) then
protocolString = "MAVLink 1.0"
break
elseif (version == 0xfd) then
protocolString = "MAVLink 2.0"
break
elseif (version == 0x55) then
protocolString = "MAVLink 0.9"
break
else
protocolString = "unknown"
-- some unknown data found, record the begin offset
if (unknownFrameBeginOffset == 0) then
unknownFrameBeginOffset = offset
end
offset = offset + 1
if (offset < buffer:len()) then
version = buffer(offset,1):uint()
else
-- no magic value found in the whole buffer. print the raw data and exit
if (unknownFrameBeginOffset ~= 0) then
if (msgCount == 1) then
pinfo.cols.info:set("Unknown message")
else
pinfo.cols.info:append(" Unknown message")
end
size = offset - unknownFrameBeginOffset
subtree:add(f.rawpayload, buffer(unknownFrameBeginOffset,size))
unknownFrameBeginOffset = 0
end
return
end
end
end
if (unknownFrameBeginOffset ~= 0) then
pinfo.cols.info:append("Unknown message")
size = offset - unknownFrameBeginOffset
subtree:add(f.rawpayload, buffer(unknownFrameBeginOffset,size))
unknownFrameBeginOffset = 0
-- jump to next loop
break
end
-- some Wireshark decoration
pinfo.cols.protocol = protocolString
-- HEADER ----------------------------------------
local msgid
local length
local incompatibility_flag
if (version == 0xfe) then
if (buffer:len() - 2 - offset > 6) then
-- normal header
local header = subtree:add("Header")
header:add(f.magic, buffer(offset,1), version)
offset = offset + 1
length = buffer(offset,1)
header:add(f.length, length)
offset = offset + 1
local sequence = buffer(offset,1)
header:add(f.sequence, sequence)
offset = offset + 1
local sysid = buffer(offset,1)
header:add(f.sysid, sysid)
offset = offset + 1
local compid = buffer(offset,1)
header:add(f.compid, compid)
offset = offset + 1
pinfo.cols.src = "System: "..tostring(sysid:uint())..', Component: '..tostring(compid:uint())
msgid = buffer(offset,1):uint()
header:add(f.msgid, buffer(offset,1), msgid)
offset = offset + 1
else
-- handle truncated header
local hsize = buffer:len() - 2 - offset
subtree:add(f.rawheader, buffer(offset, hsize))
offset = offset + hsize
end
elseif (version == 0xfd) then
if (buffer:len() - 2 - offset > 10) then
-- normal header
local header = subtree:add("Header")
header:add(f.magic, buffer(offset,1), version)
offset = offset + 1
length = buffer(offset,1)
header:add(f.length, length)
offset = offset + 1
incompatibility_flag = buffer(offset,1):uint()
header:add(f.incompatibility_flag, buffer(offset,1), incompatibility_flag)
offset = offset + 1
local compatibility_flag = buffer(offset,1)
header:add(f.compatibility_flag, compatibility_flag)
offset = offset + 1
local sequence = buffer(offset,1)
header:add(f.sequence, sequence)
offset = offset + 1
local sysid = buffer(offset,1)
header:add(f.sysid, sysid)
offset = offset + 1
local compid = buffer(offset,1)
header:add(f.compid, compid)
offset = offset + 1
pinfo.cols.src = "System: "..tostring(sysid:uint())..', Component: '..tostring(compid:uint())
msgid = buffer(offset,3):le_uint()
header:add(f.msgid, buffer(offset,3), msgid)
offset = offset + 3
else
-- handle truncated header
local hsize = buffer:len() - 2 - offset
subtree:add(f.rawheader, buffer(offset, hsize))
offset = offset + hsize
end
end
-- BODY ----------------------------------------
-- dynamically call the type-specific payload dissector
local msgnr = msgid
local dissect_payload_fn = "payload_"..tostring(msgnr)
local fn = payload_fns[dissect_payload_fn]
local limit = buffer:len() - 2
if (length) then
length = length:uint()
else
length = 0
end
if (offset + length < limit) then
limit = offset + length
end
if (fn == nil) then
pinfo.cols.info:append ("Unknown message type ")
subtree:add_expert_info(PI_MALFORMED, PI_ERROR, "Unknown message type")
size = buffer:len() - 2 - offset
subtree:add(f.rawpayload, buffer(offset,size))
offset = offset + size
else
local payload = subtree:add(f.payload, msgid)
pinfo.cols.dst:set(messageName[msgid])
if (msgCount == 1) then
-- first message should over write the TCP/UDP info
pinfo.cols.info = messageName[msgid]
else
pinfo.cols.info:append(" "..messageName[msgid])
end
fn(buffer, payload, msgid, offset, limit)
offset = limit
end
-- CRC ----------------------------------------
local crc = buffer(offset,2)
subtree:add_le(f.crc, crc)
offset = offset + 2
-- SIGNATURE ----------------------------------
if (version == 0xfd and incompatibility_flag == 0x01) then
local signature = subtree:add("Signature")
local link = buffer(offset,1)
signature:add(f.signature_link, link)
offset = offset + 1
local signature_time = buffer(offset,6):le_uint64()
local time_secs = signature_time / 100000
local time_nsecs = (signature_time - (time_secs * 100000)) * 10000
signature:add(f.signature_time, buffer(offset,6), NSTime.new(signature_time_ref + time_secs:tonumber(), time_nsecs:tonumber()))
offset = offset + 6
local signature_signature = buffer(offset,6)
signature:add(f.signature_signature, signature_signature)
offset = offset + 6
end
end
end
""")
def generate_epilog(outf):
print("Generating epilog")
t.write(outf,
"""
-- bind protocol dissector to USER0 linktype
wtap_encap = DissectorTable.get("wtap_encap")
wtap_encap:add(wtap.USER0, mavlink_proto)
-- bind protocol dissector to port 14550 and 14580
local udp_dissector_table = DissectorTable.get("udp.port")
udp_dissector_table:add(14550, mavlink_proto)
udp_dissector_table:add(14580, mavlink_proto)
""")
def generate(basename, xml):
'''generate complete python implemenation'''
if basename.endswith('.lua'):
filename = basename
else:
filename = basename + '.lua'
msgs = []
enums = []
filelist = []
for x in xml:
msgs.extend(x.message)
enums.extend(x.enum)
filelist.append(os.path.basename(x.filename))
for m in msgs:
if xml[0].little_endian:
m.fmtstr = '<'
else:
m.fmtstr = '>'
for f in m.ordered_fields:
m.fmtstr += mavfmt(f)
m.order_map = [ 0 ] * len(m.fieldnames)
for i in range(0, len(m.fieldnames)):
m.order_map[i] = m.ordered_fieldnames.index(m.fieldnames[i])
print("Generating %s" % filename)
outf = open(filename, "w")
generate_preamble(outf)
generate_msg_table(outf, msgs)
generate_body_fields(outf)
for m in msgs:
generate_msg_fields(outf, m)
for m in msgs:
generate_payload_dissector(outf, m)
generate_packet_dis(outf)
# generate_enums(outf, enums)
# generate_message_ids(outf, msgs)
# generate_classes(outf, msgs)
# generate_mavlink_class(outf, msgs, xml[0])
# generate_methods(outf, msgs)
generate_epilog(outf)
outf.close()
print("Generated %s OK" % filename)
| 32.543825 | 139 | 0.565587 |
794269026da92ff690a3e57a3e3ff56b30943a3c | 4,070 | py | Python | bot.py | peterkagey/OEISbot | b2b5abe82f74aa331d7cc9fd6a7531617136a5c5 | [
"MIT"
] | null | null | null | bot.py | peterkagey/OEISbot | b2b5abe82f74aa331d7cc9fd6a7531617136a5c5 | [
"MIT"
] | null | null | null | bot.py | peterkagey/OEISbot | b2b5abe82f74aa331d7cc9fd6a7531617136a5c5 | [
"MIT"
] | null | null | null | from __future__ import print_function
import praw
import re
import urllib.request
import json
from praw.models import MoreComments
import sys
test = False
if len(sys.argv) > 1 and sys.argv[1] == "test":
test=True
print("TEST MODE")
class FoundOne(BaseException):
pass
def read_url(url):
with urllib.request.urlopen(url) as r:
data = r.read()
return data.decode('utf-8')
def save_list(seen, _id):
print(seen)
with open("/home/pi/OEISbot/seen/"+_id, "w") as f:
return json.dump(seen, f)
def open_list(_id):
try:
with open("/home/pi/OEISbot/seen/" + _id) as f:
return json.load(f)
except:
return []
def escape(text):
text = "\\^".join(text.split("^"))
text = "\\*".join(text.split("*"))
return text
def deduplicate(target_list):
unique_values = []
[unique_values.append(x) for x in target_list if x not in unique_values]
return unique_values
def a_numbers_in_text(body):
valid_prefix = "(?:[\s\/'\"\-\+\*]|^)"
optional_opening_parens = "[\[\(\{]*"
a_number = "A(\d{6})"
valid_suffix = "(?:[\s\(\)\[\]]|$)"
a_number_regex_pattern = valid_prefix + optional_opening_parens + a_number + valid_suffix
all_matches = re.findall(a_number_regex_pattern, body)
return deduplicate(all_matches)
def look_for_A(id_, text, comment):
seen = open_list(id_)
re_s = a_numbers_in_text(text)
if test:
print(re_s)
post_me = []
for seq_n in re_s:
if seq_n not in seen:
post_me.append(markup(seq_n))
seen.append(seq_n)
if len(post_me) > 0:
post_me.append(me())
comment(escape(joiner().join(post_me)))
save_list(seen, id_)
raise FoundOne
def load_search(terms):
src = read_url("http://oeis.org/search?fmt=data&q="+terms)
ls = re.findall("href=(?:'|\")/A([0-9]{6})(?:'|\")", src)
try:
tot = int(re.findall("of ([0-9]+) results found", src)[0])
except:
tot = 0
return ls, tot
def markup(seq_n):
pattern = re.compile("%N (.*?)<", re.DOTALL|re.M)
desc = read_url("http://oeis.org/A" + seq_n + "/internal")
desc = pattern.findall(desc)[0].strip("\n")
pattern = re.compile("%S (.*?)<", re.DOTALL|re.M)
seq = read_url("http://oeis.org/A" + seq_n + "/internal")
seq = pattern.findall(seq)[0].strip("\n")
new_com = "[A" + seq_n + "](http://oeis.org/A" + seq_n + "/): "
new_com += desc + "\n\n"
new_com += seq + "..."
return new_com
def me():
return "I am OEISbot. I was programmed by /u/mscroggs. " \
"[How I work](http://mscroggs.co.uk/blog/20). " \
"You can test me and suggest new features at /r/TestingOEISbot/."
def joiner():
return "\n\n- - - -\n\n"
r = praw.Reddit("DEFAULT", user_agent="OEIS sequence poster")
#access_i = r.refresh_access_information(refresh_token=r.refresh_token)
#r.set_access_credentials(**access_i)
auth = r.user
subs = ["TestingOEISbot","math","mathpuzzles","casualmath","theydidthemath",
"learnmath","mathbooks","cheatatmathhomework","matheducation",
"puremathematics","mathpics","mathriddles","askmath",
"recreationalmath","OEIS","mathclubs","maths"]
if test:
subs = ["TestingOEISbot"]
try:
for sub in subs:
print(sub)
subreddit = r.subreddit(sub)
for submission in subreddit.hot(limit = 10):
if test:
print(submission.title)
look_for_A(submission.id,
submission.title + "|" + submission.selftext,
submission.url,
submission.reply)
for comment in submission.comments:
if ( not isinstance(comment, MoreComments)
and comment.author is not None
and comment.author.name != "OEISbot" ):
look_for_A(submission.id,
comment.body,
comment.reply)
except FoundOne:
pass
| 29.708029 | 93 | 0.58059 |
79426a5401f8ed89449902b829e52830c67edc03 | 1,206 | py | Python | setup.py | kozakHolota/pytest-html-reporter | 29aea2297a1613d3d2be1cfe54500473add8bd01 | [
"MIT"
] | null | null | null | setup.py | kozakHolota/pytest-html-reporter | 29aea2297a1613d3d2be1cfe54500473add8bd01 | [
"MIT"
] | null | null | null | setup.py | kozakHolota/pytest-html-reporter | 29aea2297a1613d3d2be1cfe54500473add8bd01 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import codecs
from setuptools import setup, find_packages
def read(fname):
file_path = os.path.join(os.path.dirname(__file__), fname)
return codecs.open(file_path, encoding="utf-8").read()
setup(
name="pytest-html-reporter",
version="0.2.9",
author="Prashanth Sams",
author_email="[email protected]",
maintainer="Prashanth Sams",
maintainer_email="[email protected]",
license="MIT",
url="https://github.com/prashanth-sams/pytest-html-reporter",
description="Generates a static html report based on pytest framework",
long_description=read("README.rst"),
keywords=["pytest", "py.test", "html", "reporter", "report"],
packages=find_packages(),
python_requires=">=3.5",
install_requires=["pytest", "Pillow"],
classifiers=[
"Framework :: Pytest",
"Topic :: Software Development :: Testing",
"Programming Language :: Python",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
],
entry_points={
"pytest11": [
"reporter = pytest_html_reporter.plugin",
],
},
)
| 28.714286 | 75 | 0.640133 |
79426aba52983da561bfc17d3be6a7224fdd7fd8 | 14,231 | py | Python | artifacts/old_dataset_versions/minimal_commits_v02/pennylane/pennylane#481_B/before/_qubit_device.py | MattePalte/Bugs-Quantum-Computing-Platforms | 0c1c805fd5dfce465a8955ee3faf81037023a23e | [
"MIT"
] | 3 | 2021-11-08T11:46:42.000Z | 2021-12-27T10:13:38.000Z | artifacts/old_dataset_versions/minimal_commits/pennylane/pennylane#481/before/_qubit_device.py | MattePalte/Bugs-Quantum-Computing-Platforms | 0c1c805fd5dfce465a8955ee3faf81037023a23e | [
"MIT"
] | 2 | 2021-11-09T14:57:09.000Z | 2022-01-12T12:35:58.000Z | artifacts/old_dataset_versions/original_commits_v02/pennylane/pennylane#481_B/before/_qubit_device.py | MattePalte/Bugs-Quantum-Computing-Platforms | 0c1c805fd5dfce465a8955ee3faf81037023a23e | [
"MIT"
] | null | null | null | # Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the :class:`QubitDevice` abstract base class.
"""
# For now, arguments may be different from the signatures provided in Device
# e.g. instead of expval(self, observable, wires, par) have expval(self, observable)
# pylint: disable=arguments-differ, abstract-method, no-value-for-parameter,too-many-instance-attributes
import abc
import numpy as np
from pennylane.operation import Sample, Variance, Expectation, Probability
from pennylane.qnodes import QuantumFunctionError
from pennylane import Device
class QubitDevice(Device):
"""Abstract base class for PennyLane qubit devices.
The following abstract methods **must** be defined:
* :meth:`~.probability`: returns the probability or marginal probability from the
device after circuit execution. :meth:`~.marginal_prob` may be used here.
* :meth:`~.apply`: append circuit operations, compile the circuit (if applicable),
and perform the quantum computation.
Where relevant, devices that generate their own samples (such as hardware) should
overwrite the following methods:
* :meth:`~.generate_samples`: Generate samples from the device from the
exact or approximate probability distribution.
This device contains common utility methods for qubit-based devices. These
do not need to be overwritten. Utility methods include:
* :meth:`~.expval`, :meth:`~.var`, :meth:`~.sample`: return expectation values,
variances, and samples of observables after the circuit has been rotated
into the observable eigenbasis.
Args:
wires (int): number of subsystems in the quantum state represented by the device
shots (int): number of circuit evaluations/random samples used to estimate
expectation values of observables
analytic (bool): If ``True``, the device calculates probability, expectation values,
and variances analytically. If ``False``, a finite number of samples set by
the argument ``shots`` are used to estimate these quantities.
"""
# pylint: disable=too-many-public-methods
_asarray = staticmethod(np.asarray)
def __init__(self, wires=1, shots=1000, analytic=True):
super().__init__(wires=wires, shots=shots)
self.analytic = analytic
"""bool: If ``True``, the device supports exact calculation of expectation
values, variances, and probabilities. If ``False``, samples are used
to estimate the statistical quantities above."""
self._wires_measured = set()
"""set[int]: wires acted on by quantum operations and observables"""
self._samples = None
"""None or array[int]: stores the samples generated by the device
*after* rotation to diagonalize the observables."""
@classmethod
def capabilities(cls):
"""Get the capabilities of the plugin.
Devices that inherit from this class automatically
have the following items in their capabilities
dictionary:
* ``"model": "qubit"``
* ``"tensor_observables": True``
Returns:
dict[str->*]: results
"""
capabilities = cls._capabilities
capabilities.update(model="qubit", tensor_observables=True)
return capabilities
def reset(self):
"""Reset the backend state.
After the reset, the backend should be as if it was just constructed.
Most importantly the quantum state is reset to its initial value.
"""
self._wires_measured = set()
self._samples = None
def execute(self, circuit):
"""Execute a queue of quantum operations on the device and then
measure the given observables.
For plugin developers: instead of overwriting this, consider
implementing a suitable subset of
* :meth:`apply`
* :meth:`~.generate_samples`
* :meth:`~.probability`
Args:
circuit (~.CircuitGraph): circuit to execute on the device
Raises:
QuantumFunctionError: if the value of :attr:`~.Observable.return_type` is not supported
Returns:
array[float]: measured value(s)
"""
self.check_validity(circuit.operations, circuit.observables)
# apply all circuit operations
self.apply(circuit.operations, circuit.diagonalizing_gates)
# determine the wires that are measured by the circuit
self._wires_measured = QubitDevice.active_wires(circuit.observables)
# generate computational basis samples
if (not self.analytic) or circuit.is_sampled:
self.generate_samples()
# compute the required statistics
results = self.statistics(circuit.observables)
# Ensures that a combination with sample does not put
# expvals and vars in superfluous arrays
all_sampled = all(obs.return_type is Sample for obs in circuit.observables)
if circuit.is_sampled and not all_sampled:
return self._asarray(results, dtype="object")
return self._asarray(results)
@abc.abstractmethod
def apply(self, operations, rotations=None, **kwargs):
"""Apply quantum operations, rotate the circuit into the measurement
basis, and compile and execute the quantum circuit.
This method recieves a list of quantum operations queued by the QNode,
and should be responsible for:
* Constructing the quantum program
* (Optional) Rotating the quantum circuit using the rotation
operations provided. This diagonalizes the circuit so that arbitrary
observables can be measured in the computational basis.
* Compile the circuit
* Execute the quantum circuit
Both arguments are provided as lists of PennyLane :class:`~.Operation`
instances. Useful properties include :attr:`~.Operation.name`,
:attr:`~.Operation.wires`, and :attr:`~.Operation.parameters`:
>>> op = qml.RX(0.2, wires=[0])
>>> op.name # returns the operation name
"RX"
>>> op.wires # returns a list of wires
[0]
>>> op.parameters # returns a list of parameters
[0.2]
Args:
operations (list[~.Operation]): operations to apply to the device
rotations (list[~.Operation]): operations that rotate the circuit
pre-measurement into the eigenbasis of the observables.
"""
@staticmethod
def active_wires(operators):
"""Returns the wires acted on by a set of operators.
Args:
operators (list[~.Operation]): operators for which
we are gathering the active wires
Returns:
set[int]: the set of wires activated by the specified operators
"""
wires = []
for op in operators:
for wire in op.wires:
if isinstance(wire, int):
wires.append(wire)
else:
wires.extend(wire)
return set(wires)
def statistics(self, observables):
"""Process measurement results from circuit execution and return statistics.
This includes returning expectation values, variance, samples and probabilities.
Args:
observables (List[:class:`Observable`]): the observables to be measured
Raises:
QuantumFunctionError: if the value of :attr:`~.Observable.return_type` is not supported
Returns:
Union[float, List[float]]: the corresponding statistics
"""
results = []
for obs in observables:
# Pass instances directly
if obs.return_type is Expectation:
results.append(self.expval(obs))
elif obs.return_type is Variance:
results.append(self.var(obs))
elif obs.return_type is Sample:
results.append(np.array(self.sample(obs)))
elif obs.return_type is Probability:
results.append(self.probability(wires=obs.wires))
elif obs.return_type is not None:
raise QuantumFunctionError(
"Unsupported return type specified for observable {}".format(obs.name)
)
return results
def generate_samples(self):
"""Generate computational basis samples.
If the device contains a sample return type, or the
device is running in non-analytic mode, ``dev.shots`` number of
computational basis samples are generated and stored within
the :attr:`~._samples` attribute.
.. warning::
This method should be overwritten on devices that
generate their own computational basis samples.
"""
number_of_states = 2 ** len(self._wires_measured)
rotated_prob = self.probability(self._wires_measured)
samples = self.sample_basis_states(number_of_states, rotated_prob)
self._samples = QubitDevice.states_to_binary(samples, number_of_states)
def sample_basis_states(self, number_of_states, state_probability):
"""Sample from the computational basis states based on the state
probability.
This is an auxiliary method to the generate_samples method.
Args:
number_of_states (int): the number of basis states to sample from
Returns:
List[int]: the sampled basis states
"""
basis_states = np.arange(number_of_states)
return np.random.choice(basis_states, self.shots, p=state_probability)
@staticmethod
def states_to_binary(samples, number_of_states):
"""Convert basis states from base 10 to binary representation.
This is an auxiliary method to the generate_samples method.
Args:
samples (List[int]): samples of basis states in base 10 representation
number_of_states (int): the number of basis states to sample from
Returns:
List[int]: basis states in binary representation
"""
powers_of_two = 1 << np.arange(number_of_states)
states_sampled_base_ten = samples[:, None] & powers_of_two
return (states_sampled_base_ten > 0).astype(int)
@property
def state(self):
"""Returns the state vector of the circuit prior to measurement.
.. note::
Only state vector simulators support this property. Please see the
plugin documentation for more details.
"""
raise NotImplementedError
@abc.abstractmethod
def probability(self, wires=None):
"""Return the (marginal) probability of each computational basis
state from the last run of the device.
If no wires are specified, then all the basis states representable by
the device are considered and no marginalization takes place.
Args:
wires (Sequence[int]): Sequence of wires to return
marginal probabilities for. Wires not provided
are traced out of the system.
Returns:
List[float]: list of the probabilities
"""
def marginal_prob(self, prob, wires=None):
"""Return the marginal probability of the computational basis
states by summing the probabiliites on the non-specified wires.
If no wires are specified, then all the basis states representable by
the device are considered and no marginalization takes place.
Args:
prob: The probabilities to return the marginal probabilities
for
wires (Sequence[int]): Sequence of wires to return
marginal probabilities for. Wires not provided
are traced out of the system.
Returns:
array[float]: array of the resulting marginal probabilities.
"""
wires = list(wires or range(self.num_wires))
wires = np.hstack(wires)
inactive_wires = list(set(range(self.num_wires)) - set(wires))
prob = prob.reshape([2] * self.num_wires)
return np.apply_over_axes(np.sum, prob, inactive_wires).flatten()
def expval(self, observable):
wires = observable.wires
if self.analytic:
# exact expectation value
eigvals = observable.eigvals
prob = self.probability(wires=wires)
return (eigvals @ prob).real
# estimate the ev
return np.mean(self.sample(observable))
def var(self, observable):
wires = observable.wires
if self.analytic:
# exact variance value
eigvals = observable.eigvals
prob = self.probability(wires=wires)
return (eigvals ** 2) @ prob - (eigvals @ prob).real ** 2
# estimate the variance
return np.var(self.sample(observable))
def sample(self, observable):
wires = observable.wires
name = observable.name
if isinstance(name, str) and name in {"PauliX", "PauliY", "PauliZ", "Hadamard"}:
# Process samples for observables with eigenvalues {1, -1}
return 1 - 2 * self._samples[:, wires[0]]
# Replace the basis state in the computational basis with the correct eigenvalue.
# Extract only the columns of the basis samples required based on ``wires``.
wires = np.hstack(wires)
samples = self._samples[:, np.array(wires)]
unraveled_indices = [2] * len(wires)
indices = np.ravel_multi_index(samples.T, unraveled_indices)
return observable.eigvals[indices]
| 36.963636 | 104 | 0.651676 |
79426bd58c427c8e4f0fd50831915065d3dfb32e | 18,336 | py | Python | discovery-provider/src/eth_indexing/event_scanner.py | atticwip/audius-protocol | 9758e849fae01508fa1d27675741228b11533e6e | [
"Apache-2.0"
] | 4 | 2021-12-24T14:16:05.000Z | 2022-01-13T22:41:33.000Z | discovery-provider/src/eth_indexing/event_scanner.py | SNOmad1/audius-protocol | 3d5fc2bf688265eb529060f1f3234ef2b95ed231 | [
"Apache-2.0"
] | null | null | null | discovery-provider/src/eth_indexing/event_scanner.py | SNOmad1/audius-protocol | 3d5fc2bf688265eb529060f1f3234ef2b95ed231 | [
"Apache-2.0"
] | null | null | null | import datetime
import time
import logging
from typing import Tuple, Iterable, Union, Type, TypedDict, Any
from sqlalchemy import or_
from web3 import Web3
from web3.contract import Contract, ContractEvent
from web3.exceptions import BlockNotFound
from web3.types import BlockIdentifier
# Currently this method is not exposed over official web3 API,
# but we need it to construct eth_get_logs parameters
from web3._utils.filters import construct_event_filter_params
from web3._utils.events import get_event_data
from eth_abi.codec import ABICodec
from src.models.models import AssociatedWallet, EthBlock, User
from src.utils.helpers import redis_set_and_dump, redis_get_or_restore
from src.queries.get_balances import enqueue_immediate_balance_refresh
logger = logging.getLogger(__name__)
eth_indexing_last_scanned_block_key = "eth_indexing_last_scanned_block"
# How many times we try to re-attempt a failed JSON-RPC call
MAX_REQUEST_RETRIES = 30
# Delay between failed requests to let JSON-RPC server to recover
REQUEST_RETRY_SECONDS = 3
# Minimum number of blocks to scan for our JSON-RPC throttling parameters
MIN_SCAN_CHUNK_SIZE = 10
# How many maximum blocks at the time we request from JSON-RPC
# and we are unlikely to exceed the response size limit of the JSON-RPC server
MAX_CHUNK_SCAN_SIZE = 10000
# Factor how was we increase chunk size if no results found
CHUNK_SIZE_INCREASE = 2
# initial number of blocks to scan, this number will increase/decrease as a function of whether transfer events have been found within the range of blocks scanned
START_CHUNK_SIZE = 20
# how many blocks from tail of chain we want to scan to
ETH_BLOCK_TAIL_OFFSET = 1
# the block number to start with if first time scanning
# this should be the first block during and after which $AUDIO transfer events started occurring
MIN_SCAN_START_BLOCK = 11103292
class TransferEvent(TypedDict):
logIndex: int
transactionHash: Any
blockNumber: int
args: Any
class EventScanner:
"""Scan blockchain for events and try not to abuse JSON-RPC API too much.
Can be used for real-time scans, as it detects minor chain reorganisation and rescans.
Unlike the easy web3.contract.Contract, this scanner can scan events from multiple contracts at once.
For example, you can get all transfers from all tokens in the same scan.
You *should* disable the default `http_retry_request_middleware` on your provider for Web3,
because it cannot correctly throttle and decrease the `eth_get_logs` block number range.
"""
def __init__(
self,
db,
redis,
web3: Web3,
contract: Type[Contract],
event_type: Type[ContractEvent],
filters: dict,
):
"""
:param db: database handle
:param redis: redis handle
:param web3: Web3 instantiated with provider url
:param contract: Contract
:param state: state manager to keep tracks of last scanned block and persisting events to db
:param event_type: web3 Event we scan
:param filters: Filters passed to get_logs e.g. { "address": <token-address> }
"""
self.logger = logger
self.db = db
self.redis = redis
self.contract = contract
self.web3 = web3
self.event_type = event_type
self.filters = filters
self.last_scanned_block = MIN_SCAN_START_BLOCK
self.latest_chain_block = self.web3.eth.blockNumber
def restore(self):
"""Restore the last scan state from redis.
If value not found in redis, restore from database."""
restored = redis_get_or_restore(self.redis, eth_indexing_last_scanned_block_key)
if not restored:
with self.db.scoped_session() as session:
result = session.query(EthBlock.last_scanned_block).first()
restored = result[0] if result else restored
self.last_scanned_block = int(restored) if restored else MIN_SCAN_START_BLOCK
logger.info(
f"event_scanner.py | Restored last scanned block ({self.last_scanned_block})"
)
def save(self, block_number: int):
"""Save at the end of each chunk of blocks, so we can resume in the case of a crash or CTRL+C
Next time the scanner is started we will resume from this block
"""
self.last_scanned_block = block_number
logger.info(
f"event_scanner.py | Saving last scanned block ({self.last_scanned_block}) to redis"
)
redis_set_and_dump(
self.redis,
eth_indexing_last_scanned_block_key,
str(self.last_scanned_block),
)
with self.db.scoped_session() as session:
record = session.query(EthBlock).first()
if record:
record.last_scanned_block = self.last_scanned_block
else:
record = EthBlock(last_scanned_block=self.last_scanned_block)
session.add(record)
def get_block_timestamp(self, block_num) -> Union[datetime.datetime, None]:
"""Get Ethereum block timestamp"""
try:
block_info = self.web3.eth.getBlock(block_num)
except BlockNotFound:
# Block was not mined yet,
# minor chain reorganisation?
return None
last_time = block_info["timestamp"]
return datetime.datetime.utcfromtimestamp(last_time)
def get_suggested_scan_end_block(self):
"""Get the last mined block on Ethereum chain we are following."""
# Do not scan all the way to the final block, as this
# block might not be mined yet
return self.latest_chain_block - ETH_BLOCK_TAIL_OFFSET
def get_last_scanned_block(self) -> int:
"""The number of the last block we have stored."""
return self.last_scanned_block
def process_event(
self, block_timestamp: datetime.datetime, event: TransferEvent
) -> str:
"""Record a ERC-20 transfer in our database."""
# Events are keyed by their transaction hash and log index
# One transaction may contain multiple events
# and each one of those gets their own log index
log_index = event["logIndex"] # Log index within the block
# transaction_index = event.transactionIndex # Transaction index within the block
txhash = event["transactionHash"].hex() # Transaction hash
block_number = event["blockNumber"]
# Convert ERC-20 Transfer event to our internal format
args = event["args"]
transfer = {
"from": args["from"],
"to": args["to"],
"value": args["value"],
"timestamp": block_timestamp,
}
# add user ids from the transfer event into the balance refresh queue
transfer_event_wallets = [transfer["from"].lower(), transfer["to"].lower()]
with self.db.scoped_session() as session:
result = (
session.query(User.user_id)
.outerjoin(AssociatedWallet, User.user_id == AssociatedWallet.user_id)
.filter(User.is_current == True)
.filter(AssociatedWallet.is_current == True)
.filter(AssociatedWallet.is_delete == False)
.filter(
or_(
User.wallet.in_(transfer_event_wallets),
AssociatedWallet.wallet.in_(transfer_event_wallets),
)
)
.all()
)
user_ids = [user_id for [user_id] in result]
enqueue_immediate_balance_refresh(self.redis, user_ids)
# Return a pointer that allows us to look up this event later if needed
return f"{block_number}-{txhash}-{log_index}"
def scan_chunk(self, start_block, end_block) -> Tuple[int, list]:
"""Read and process events between to block numbers.
Dynamically decrease the size of the chunk in case the JSON-RPC server pukes out.
:return: tuple(actual end block number, when this block was mined, processed events)
"""
block_timestamps = {}
get_block_timestamp = self.get_block_timestamp
# Cache block timestamps to reduce some RPC overhead
# Real solution might include smarter models around block
def get_block_mined_timestamp(block_num):
if block_num not in block_timestamps:
block_timestamps[block_num] = get_block_timestamp(block_num)
return block_timestamps[block_num]
all_processed = []
# Callable that takes care of the underlying web3 call
def _fetch_events(from_block, to_block):
return _fetch_events_for_all_contracts(
self.web3,
self.event_type,
self.filters,
from_block=from_block,
to_block=to_block,
)
# Do `n` retries on `eth_get_logs`,
# throttle down block range if needed
end_block, events = _retry_web3_call(
_fetch_events, start_block=start_block, end_block=end_block
)
for evt in events:
idx = evt[
"logIndex"
] # Integer of the log index position in the block, null when its pending
# We cannot avoid minor chain reorganisations, but
# at least we must avoid blocks that are not mined yet
assert idx is not None, "Somehow tried to scan a pending block"
block_number = evt["blockNumber"]
# Get UTC time when this event happened (block mined timestamp)
# from our in-memory cache
block_timestamp = get_block_mined_timestamp(block_number)
logger.debug(
f'event_scanner.py | Processing event {evt["event"]}, block:{evt["blockNumber"]}'
)
processed = self.process_event(block_timestamp, evt)
all_processed.append(processed)
return end_block, all_processed
def estimate_next_chunk_size(self, current_chuck_size: int, event_found_count: int):
"""Try to figure out optimal chunk size
Our scanner might need to scan the whole blockchain for all events
* We want to minimize API calls over empty blocks
* We want to make sure that one scan chunk does not try to process too many entries once, as we try to control commit buffer size and potentially asynchronous busy loop
* Do not overload node serving JSON-RPC API by asking data for too many events at a time
Currently Ethereum JSON-API does not have an API to tell when a first event occured in a blockchain
and our heuristics try to accelerate block fetching (chunk size) until we see the first event.
These heurestics exponentially increase the scan chunk size depending on if we are seeing events or not.
When any transfers are encountered, we are back to scanning only a few blocks at a time.
It does not make sense to do a full chain scan starting from block 1, doing one JSON-RPC call per 20 blocks.
"""
if event_found_count > 0:
# When we encounter first events, reset the chunk size window
current_chuck_size = MIN_SCAN_CHUNK_SIZE
else:
current_chuck_size *= CHUNK_SIZE_INCREASE
current_chuck_size = max(MIN_SCAN_CHUNK_SIZE, current_chuck_size)
current_chuck_size = min(MAX_CHUNK_SCAN_SIZE, current_chuck_size)
return int(current_chuck_size)
def scan(
self,
start_block,
end_block,
start_chunk_size=START_CHUNK_SIZE,
) -> Tuple[list, int]:
"""Perform a token events scan.
:param start_block: The first block included in the scan
:param end_block: The last block included in the scan
:param start_chunk_size: How many blocks we try to fetch over JSON-RPC on the first attempt
:return: [All processed events, number of chunks used]
"""
current_block = start_block
# Scan in chunks, commit between
chunk_size = start_chunk_size
last_scan_duration = last_logs_found = 0
total_chunks_scanned = 0
# All processed entries we got on this scan cycle
all_processed = []
while current_block <= end_block:
# Print some diagnostics to logs to try to fiddle with real world JSON-RPC API performance
estimated_end_block = min(
current_block + chunk_size, self.get_suggested_scan_end_block()
)
logger.debug(
"event_scanner.py | Scanning token transfers for blocks: %d - %d, chunk size %d, last chunk scan took %f, last logs found %d",
current_block,
estimated_end_block,
chunk_size,
last_scan_duration,
last_logs_found,
)
start = time.time()
actual_end_block, new_entries = self.scan_chunk(
current_block, estimated_end_block
)
# Where does our current chunk scan ends - are we out of chain yet?
current_end = actual_end_block
last_scan_duration = int(time.time() - start)
all_processed += new_entries
# Try to guess how many blocks to fetch over `eth_get_logs` API next time
chunk_size = self.estimate_next_chunk_size(chunk_size, len(new_entries))
# Set where the next chunk starts
current_block = current_end + 1
total_chunks_scanned += 1
self.save(min(current_end, self.get_suggested_scan_end_block()))
return all_processed, total_chunks_scanned
def _retry_web3_call( # type: ignore
func,
start_block,
end_block,
retries=MAX_REQUEST_RETRIES,
delay=REQUEST_RETRY_SECONDS,
) -> Tuple[int, list]: # type: ignore
"""A custom retry loop to throttle down block range.
If our JSON-RPC server cannot serve all incoming `eth_get_logs` in a single request,
we retry and throttle down block range for every retry.
For example, Go Ethereum does not indicate what is an acceptable response size.
It just fails on the server-side with a "context was cancelled" warning.
:param func: A callable that triggers Ethereum JSON-RPC, as func(start_block, end_block)
:param start_block: The initial start block of the block range
:param end_block: The initial start block of the block range
:param retries: How many times we retry
:param delay: Time to sleep between retries
"""
for i in range(retries):
try:
return end_block, func(start_block, end_block)
except Exception as e:
# Assume this is HTTPConnectionPool(host='localhost', port=8545): Read timed out. (read timeout=10)
# from Go Ethereum. This translates to the error "context was cancelled" on the server side:
# https://github.com/ethereum/go-ethereum/issues/20426
if i < retries - 1:
# Give some more verbose info than the default middleware
logger.warning(
"event_scanner.py | Retrying events for block range %d - %d (%d) failed with %s, retrying in %s seconds",
start_block,
end_block,
end_block - start_block,
e,
delay,
)
# Decrease the `eth_get_blocks` range
end_block = start_block + ((end_block - start_block) // 2)
# Let the JSON-RPC to recover e.g. from restart
time.sleep(delay)
continue
else:
logger.warning("event_scanner.py | Out of retries")
raise
def _fetch_events_for_all_contracts(
web3,
event_type,
argument_filters: dict,
from_block: BlockIdentifier,
to_block: BlockIdentifier,
) -> Iterable:
"""Get events using eth_get_logs API.
This method is detached from any contract instance.
This is a stateless method, as opposed to createFilter.
It can be safely called against nodes which do not provide `eth_newFilter` API, like Infura.
"""
if from_block is None:
raise TypeError("Missing mandatory keyword argument to get_logs: fromBlock")
# Currently no way to poke this using a public Web3.py API.
# This will return raw underlying ABI JSON object for the event
abi = event_type._get_event_abi()
# Depending on the Solidity version used to compile
# the contract that uses the ABI,
# it might have Solidity ABI encoding v1 or v2.
# We just assume the default that you set on Web3 object here.
# More information here https://eth-abi.readthedocs.io/en/latest/index.html
codec: ABICodec = web3.codec
# Here we need to poke a bit into Web3 internals, as this
# functionality is not exposed by default.
# Construct JSON-RPC raw filter presentation based on human readable Python descriptions
# Namely, convert event names to their keccak signatures
# More information here:
# https://github.com/ethereum/web3.py/blob/e176ce0793dafdd0573acc8d4b76425b6eb604ca/web3/_utils/filters.py#L71
_, event_filter_params = construct_event_filter_params(
abi,
codec,
address=argument_filters.get("address"),
argument_filters=argument_filters,
fromBlock=from_block,
toBlock=to_block,
)
logger.debug(
"event_scanner.py | Querying eth_get_logs with the following parameters: %s",
event_filter_params,
)
# Call JSON-RPC API on your Ethereum node.
# get_logs() returns raw AttributedDict entries
logs = web3.eth.getLogs(event_filter_params)
# Convert raw binary data to Python proxy objects as described by ABI
all_events = []
for log in logs:
# Convert raw JSON-RPC log result to human readable event by using ABI data
# More information how processLog works here
# https://github.com/ethereum/web3.py/blob/fbaf1ad11b0c7fac09ba34baff2c256cffe0a148/web3/_utils/events.py#L200
event = get_event_data(codec, abi, log)
all_events.append(event)
return all_events
| 40.476821 | 176 | 0.660013 |
79426c4fbdda41af4238f10987aa941ddb5f7585 | 3,846 | py | Python | src/virtual_io_server/volume_group/ModifyVolumeGroup.py | Niraj-Shah-1/HmcRestClient-1.0 | 529002588cd9bf5313f1c8ed2948ab96e648d45c | [
"Apache-2.0"
] | 21 | 2015-04-23T06:26:20.000Z | 2022-03-23T16:16:32.000Z | src/virtual_io_server/volume_group/ModifyVolumeGroup.py | Niraj-Shah-1/HmcRestClient-1.0 | 529002588cd9bf5313f1c8ed2948ab96e648d45c | [
"Apache-2.0"
] | 7 | 2015-06-17T15:13:15.000Z | 2020-09-18T00:47:04.000Z | src/virtual_io_server/volume_group/ModifyVolumeGroup.py | Niraj-Shah-1/HmcRestClient-1.0 | 529002588cd9bf5313f1c8ed2948ab96e648d45c | [
"Apache-2.0"
] | 13 | 2015-06-17T09:48:20.000Z | 2021-03-15T12:09:22.000Z | # Copyright 2015, 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from src.utility import HTTPClient
from src.generated_src import UOM
import pyxb
ROOT = "VirtualIOServer"
CONTENT_TYPE = "application/vnd.ibm.powervm.uom+xml; type=VolumeGroup"
LOGICALVOLUME_NAME = "lv_1"
PHYSICALVOLUME_NAME = "hdisk7"
SCHEMA_VERSION = "V1_3_0"
def sendrequest(xml, ip, root, content_type, x_api_session, vios_uuid, volumegroup_id):
"""
performs the HTTPPost request with modified volume group
"""
http_object = HTTPClient.HTTPClient("uom", ip, root, content_type, x_api_session)
http_object.HTTPPost(xml,append=vios_uuid+"/VolumeGroup/"+volumegroup_id)
if http_object.response_b:
return True
else:
return False
class ModifyVolumeGroup:
def __init__(self):
"""
initializes root and content-type
"""
self.root = ROOT
self.content_type = CONTENT_TYPE
def add_physicalvolume(self, ip, vios_uuid, x_api_session, volumegroup_object):
"""
adds a Physical volume to the selected volume group
Args:
ip:ip address of hmc
vios_uuid:UUID of VirtualIOServer
x_api_session:session to be used
volumegroup_object:volume group object to be modified
"""
pyxb.RequireValidWhenGenerating(True)
physicalvolume_object = UOM.PhysicalVolume()
physicalvolume_object.VolumeName = PHYSICALVOLUME_NAME
physicalvolume_object.schemaVersion = SCHEMA_VERSION
volumegroup_object.PhysicalVolumes.PhysicalVolume.append(physicalvolume_object)
xml = volumegroup_object.toxml()
volumegroup_id = volumegroup_object.Metadata.Atom.AtomID.value()
response = sendrequest(xml, ip, self.root, self.content_type, x_api_session, vios_uuid, volumegroup_id)
if response :
print("Physical volume added to volumegroup Successfully")
else:
print("Adding Physical volume to volumegroup failed")
def add_virtualdisk(self, ip, vios_uuid, x_api_session, volumegroup_object):
"""
creates a virtualdisk in VolumeGroup
Args:
ip:ip address of hmc
vios_uuid:UUID of VirtualIOServer
x_api_session:session to be used
volumegroup_object:volume group object to be modified
"""
pyxb.RequireValidWhenGenerating(True)
virtualdisk_object = UOM.VirtualDisk()
virtualdisk_object.DiskName = LOGICALVOLUME_NAME
virtualdisk_object.schemaVersion = SCHEMA_VERSION
virtualdisk_object.DiskCapacity = volumegroup_object.GroupCapacity.value()/2
volumegroup_object.VirtualDisks.VirtualDisk.append(virtualdisk_object)
xml = volumegroup_object.toxml()
volumegroup_id = volumegroup_object.Metadata.Atom.AtomID.value()
response = sendrequest(xml, ip, self.root, self.content_type, x_api_session, vios_uuid, volumegroup_id)
if response :
print("VirtualDisk Created in VolumeGroup Successfully")
else:
print("VirtualDisk Creation unsuccessfull")
| 41.804348 | 115 | 0.674207 |
79426daf099d673a1f5a3cc3e136325015387acb | 1,500 | py | Python | app/recipe/serializers.py | theonlysam/recipe-app-api | bb840796dc9b5a9760fd6e2c29e2ae3a0a26e92e | [
"MIT"
] | null | null | null | app/recipe/serializers.py | theonlysam/recipe-app-api | bb840796dc9b5a9760fd6e2c29e2ae3a0a26e92e | [
"MIT"
] | null | null | null | app/recipe/serializers.py | theonlysam/recipe-app-api | bb840796dc9b5a9760fd6e2c29e2ae3a0a26e92e | [
"MIT"
] | null | null | null | from rest_framework import serializers
from core.models import Tag, Ingredient, Recipe
class TagSerializer(serializers.ModelSerializer):
"""
Serializer for tag objects
"""
class Meta:
model = Tag
fields = ('id', 'name')
read_only_fields = ('id',)
class IngredientSerializer(serializers.ModelSerializer):
"""
Serializer for ingredient objects
"""
class Meta:
model = Ingredient
fields = ('id', 'name')
read_only_fields = ('id',)
class RecipeSerializer(serializers.ModelSerializer):
"""
Serializer for recipe objects
"""
ingredients = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Ingredient.objects.all()
)
tags = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Ingredient.objects.all()
)
class Meta:
model = Recipe
fields = (
'id', 'title', 'ingredients', 'tags', 'time_minutes',
'price', 'link'
)
read_only_fields = ('id',)
class RecipeDetailSerializer(RecipeSerializer):
"""
Serializer for a recipe detail
"""
ingredients = IngredientSerializer(many=True, read_only=True)
tags = TagSerializer(many=True, read_only=True)
class RecipeImageSerializer(serializers.ModelSerializer):
"""
Serializer for uploading images to recipes
"""
class Meta:
model = Recipe
fields = ('id', 'image')
read_only_fields = ('id',) | 23.4375 | 65 | 0.623333 |
79426e030fd5a5dc8b650b319715cdd0ee9e46fc | 4,835 | py | Python | chatterbot/logic/logic_adapter.py | ingeniousambivert/chatbot | fb1d9659df6c1b6eddd8ee9349f5a65a0530db2a | [
"BSD-3-Clause"
] | null | null | null | chatterbot/logic/logic_adapter.py | ingeniousambivert/chatbot | fb1d9659df6c1b6eddd8ee9349f5a65a0530db2a | [
"BSD-3-Clause"
] | null | null | null | chatterbot/logic/logic_adapter.py | ingeniousambivert/chatbot | fb1d9659df6c1b6eddd8ee9349f5a65a0530db2a | [
"BSD-3-Clause"
] | null | null | null | from chatterbot.adapters import Adapter
from chatterbot.storage import StorageAdapter
from chatterbot.search import IndexedTextSearch
from chatterbot.conversation import Statement
class LogicAdapter(Adapter):
"""
This is an abstract class that represents the interface
that all logic adapters should implement.
:param search_algorithm_name: The name of the search algorithm that should
be used to search for close matches to the provided input.
Defaults to the value of ``Search.name``.
:param maximum_similarity_threshold:
The maximum amount of similarity between two statement that is required
before the search process is halted. The search for a matching statement
will continue until a statement with a greater than or equal similarity
is found or the search set is exhausted.
Defaults to 0.95
:param response_selection_method:
The a response selection method.
Defaults to ``get_first_response``
:type response_selection_method: collections.abc.Callable
:param default_response:
The default response returned by this logic adaper
if there is no other possible response to return.
:type default_response: str or list or tuple
"""
def __init__(self, chatbot, **kwargs):
super().__init__(chatbot, **kwargs)
from chatterbot.response_selection import get_first_response
self.search_algorithm_name = kwargs.get(
'search_algorithm_name',
IndexedTextSearch.name
)
self.search_algorithm = self.chatbot.search_algorithms[
self.search_algorithm_name
]
self.maximum_similarity_threshold = kwargs.get(
'maximum_similarity_threshold', 0.95
)
# By default, select the first available response
self.select_response = kwargs.get(
'response_selection_method',
get_first_response
)
default_responses = kwargs.get('default_response', [])
# Convert a single string into a list
if isinstance(default_responses, str):
default_responses = [
default_responses
]
self.default_responses = [
Statement(text=default) for default in default_responses
]
def can_process(self, statement):
"""
A preliminary check that is called to determine if a
logic adapter can process a given statement. By default,
this method returns true but it can be overridden in
child classes as needed.
:rtype: bool
"""
return True
def process(self, statement, additional_response_selection_parameters=None):
"""
Override this method and implement your logic for selecting a response to an input statement.
A confidence value and the selected response statement should be returned.
The confidence value represents a rating of how accurate the logic adapter
expects the selected response to be. Confidence scores are used to select
the best response from multiple logic adapters.
The confidence value should be a number between 0 and 1 where 0 is the
lowest confidence level and 1 is the highest.
:param statement: An input statement to be processed by the logic adapter.
:type statement: Statement
:param additional_response_selection_parameters: Parameters to be used when
filtering results to choose a response from.
:type additional_response_selection_parameters: dict
:rtype: Statement
"""
raise self.AdapterMethodNotImplementedError()
def get_default_response(self, input_statement):
"""
This method is called when a logic adapter is unable to generate any
other meaningful response.
"""
from random import choice
if self.default_responses:
response = choice(self.default_responses)
else:
try:
response = self.chatbot.storage.get_random()
except StorageAdapter.EmptyDatabaseException:
response = input_statement
self.chatbot.logger.info(
'No known response to the input was found. Selecting a random response.'
)
# Set confidence to zero because a random response is selected
response.confidence = 0
return response
@property
def class_name(self):
"""
Return the name of the current logic adapter class.
This is typically used for logging and debugging.
"""
return str(self.__class__.__name__)
| 36.08209 | 102 | 0.651706 |
79426e0bbe7e4fc0ed0f4a115ae78da0882a3752 | 25,924 | py | Python | MappingGlobalCarbon/gfw_forestlearn/fl_regression.py | forc-db/GROA | a316bd6c70d02a8904a19fc554747acfadbae0c1 | [
"CC-BY-4.0"
] | 20 | 2020-09-27T07:37:19.000Z | 2022-02-19T13:26:47.000Z | MappingGlobalCarbon/gfw_forestlearn/fl_regression.py | forc-db/GROA | a316bd6c70d02a8904a19fc554747acfadbae0c1 | [
"CC-BY-4.0"
] | 2 | 2020-09-18T11:08:37.000Z | 2021-03-25T11:16:22.000Z | MappingGlobalCarbon/gfw_forestlearn/fl_regression.py | forc-db/GROA | a316bd6c70d02a8904a19fc554747acfadbae0c1 | [
"CC-BY-4.0"
] | 9 | 2020-11-12T01:49:16.000Z | 2022-02-22T00:54:02.000Z | import subprocess
import gdal
import pandas as pd
import numpy as np
import glob
import rasterio
import os
import datetime
import csv
import random
from math import sqrt
import pickle
import math
import datetime
import warnings
from shutil import copyfile
import csv
import sys
import math
import shutil
from sklearn.model_selection import cross_val_score
from sklearn import metrics
from sklearn import svm
from sklearn.model_selection import GridSearchCV, StratifiedKFold, validation_curve, train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestRegressor
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectFromModel
from sklearn.svm import LinearSVR
from sklearn.neural_network import MLPClassifier
from sklearn.utils import resample
from sklearn.metrics import mean_squared_error
import xgboost as xgb
class ForestLearn(object):
"""
Build machine learning object that can find the best parameters for final run.
"""
def __init__(self, predictors=[], y_column=None, xy = [], cat_feats = [], one_hot_feats = []):
"""
Defines predictors, response variable, coordinate column names, binary features, categorical features, and numeric features
Inputs:
predictors (list): List of predictor variables to be used in model, these will also be referred to as features
y_column (string): Name of response variable column in dataframes
xy (list): Name of coordinate columns for x, y (or longitude, latitude) positions of pixels
cat_feats (list): List of binary features that will stay (0,1)
one_hot_feats (list): List of categorical features with more than one category, these will be transformed using one hot encoding to binary features.
Numeric features are those that are listed in predictors but not in cat_feats or one_hot_feats.
Predictors, cat_feats, and one_hot_feats are combined to ensure all features are used and removes duplicates, so you can list both cat_feats and
one_hot_feats in predictors or enter them seperately.
For example:
predictors= ['rainfall','temperature','biome','protected']
cat_feats = ['protected']
one_hot_feats = ['biome']
OR
predictors= ['rainfall','temperature']
cat_feats = ['protected']
one_hot_feats = ['biome']
are both accepted.
"""
# Exit if y_column is not present
if y_column is None:
sys.exit('"y_column" must be defined in training process...')
# Merge inputted feature lists and remove duplicates
predictors = list(set(predictors + cat_feats + one_hot_feats))
# Select numeric features as predictors that are not in cat_feats or one_hot_feats
numeric_features = [x for x in predictors if x not in cat_feats+one_hot_feats]
# Save parameters to ForestLearn object
self.y_column = y_column
self.xy = xy
self.predictors = predictors
self.numeric_features = numeric_features
self.categorical_features = cat_feats
self.one_hot_features = one_hot_feats
self.best_params = {}
self.rmse = {}
self.r2 = {}
self.avg_res = {}
self.avg_abs_res = {}
def tune_param_set(self, train, params, out_modelfilename, cv_results_filename, k=5, scoring='neg_root_mean_squared_error', n_jobs=4,verbose=1,refit=True):
"""
Given a dictionary of lists of parameters to try, runs GridSearchCV to use cross validation to find the best set of parameters based on the cross
validation score defined in the scoring variable.
Saves the best fitting model to ForestLearn object, outputs model to pickle file, and outputs cross-validation results
See documentation on GridSearchCV: https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
Inputs:
train (DataFrame): Dataframe of training data containing predictors and y_column
params (dictionary): Dictionary with parameters names (str) as keys and lists of parameter settings to try as values,
or a list of such dictionaries, in which case the grids spanned by each dictionary in the list are explored.
This enables searching over any sequence of parameter settings.
out_modelfilename (string): Output file name to save trained model, using pickle, should have extension .pkl
cv_results_filename (string): Output file name to save cross-validation results, should be a csv with extension .csv
k (integer): Integer number for number of folds in cross-validation
scoring (string): Scoring name to evaluate model, see https://scikit-learn.org/stable/modules/model_evaluation.html for options
n_jobs (integer): Integer number of threads (CPUs) to train model, -1 uses all
verbose (integer): Controls the verbosity: the higher, the more messages.
refit (boolean): Refit an estimator using the best found parameters on the whole dataset.
Returns: None
"""
# Define grid_search
grid_search = GridSearchCV(self.mdl, params, n_jobs=n_jobs, verbose=verbose, cv=k, scoring=scoring,
refit=refit, return_train_score=True)
# Fit grid_search
grid_search.fit(train[self.predictors], train[self.y_column])
# Save best parameters
self.best_params = grid_search.best_params_
# Save best mdoel
self.mdl = grid_search.best_estimator_
# Save best model to out_modelfilename
pickle.dump(self.mdl, open(out_modelfilename, 'wb'))
# Save cv_results to cv_results_filename
cv_results_df = pd.DataFrame.from_dict(grid_search.cv_results_)
cv_results_df.to_csv(cv_results_filename,index=False)
return self.mdl
def fit_model_with_params(self, train, out_modelfilename, in_params=None, in_modelfilename=None):
"""
Given a dictionary of parameters or input model filename to load parameters from trained model, trains a model with inputted parameters
on training data and saves model. This parameters should only be for the machine learning part of the pipeline, named "learn".
This step should only be run after setting up model pipeline type, i.e. running setup_xgb_model or setup_rf_model where the
feature scaling and selecting is done.
Inputs:
train (DataFrame): Dataframe of training data containing predictors and y_column
out_modelfilename (string): Output file name to save trained model, using pickle, should have extension .pkl
in_params (dictionary): Dictionary with parameters names (str) as keys and parameter setting as value to train the model
in_modelfilename (string): Input file name of trained model to load parameters, should have extension .pkl
Returns: None
"""
# Exit if in_params or in_modelfilename is not given
if (in_params is None) and (in_modelfilename is None):
sys.exit('Either in_params or in_modelfilename must be provided')
# If in_params is given, load parameters for "learn" machine learning part of the pipeline
elif in_params:
self.mdl.named_steps['learn'].set_params(**in_params)
params = in_params
# If in_modelfilename is given, load parameters from modelfile for "learn" machine learning part of the pipeline
elif in_modelfilename:
in_model = pickle.load(open(in_modelfilename, 'rb'))
params = in_model.named_steps['learn'].get_params()
self.mdl.named_steps['learn'].set_params(**params)
# Fit model on training data in train
self.mdl.fit(train[self.predictors], train[self.y_column])
# Save best parameters
self.best_params = params
# Save to out_modelfilename
pickle.dump(self.mdl, open(out_modelfilename, 'wb'))
return self.mdl
def load_model_from_file(self, in_modelfilename):
"""
Loads inputted model and saves to ForestLearn object
Inputs:
in_modelfilename (string): Input file name of trained model to save, should have extension .pkl
Returns: None
"""
self.mdl = pickle.load(open(in_modelfilename, 'rb'))
self.best_params = self.mdl.named_steps['learn'].get_params()
return self.mdl
def save_feature_importances(self, feature_importance_filename):
# """
# Saves feature importances from trained model
#
# Inputs:
# feature_importance_filename (string): File name to save feature importances to, should have extension .csv
# """
# # If one_hot_features are listed, grab the new one hot encoded feature names
# # Then the list of parameters is numeric features, one hot encoded features, and categorical features
#
# preprocess_step = list(self.mdl.named_steps.keys())[-2]
# transformer = self.mdl.named_steps[preprocess_step]
# print(transformer)
# if isinstance(transformer,ColumnTransformer):
# print(transformer.transformers_)
# elif isinstance(transformer,SelectFromModel):
# print(transformer.get_support())
#print()
#print(transformers)
# names = [x[1].named_steps for x in transformers]
#
# has_pca = False
# has_one_hot = False
#
# for dictionary in names:
# for value in dictionary.values():
# if isinstance(value,PCA):
# has_pca = True
# if isinstance(value,OneHotEncoder):
# has_one_hot=True
#
# print(self.one_hot_features, has_pca, has_one_hot)
if self.one_hot_features:
try:
ohe = (self.mdl.named_steps['preprocess'].named_transformers_['cat'].named_steps['onehot'])
one_hot_feature_names = ohe.get_feature_names(input_features=self.one_hot_features)
all_feature_names = np.r_[self.numeric_features, one_hot_feature_names, self.categorical_features]
except:
all_feature_names = self.predictors
# Otherwise the features are in order
else:
all_feature_names = self.predictors
print(len(self.mdl.named_steps['learn'].feature_importances_))
# Merge feature importances and names, save to file
# try:
# feature_importances = self.mdl.named_steps['learn'].feature_importances_
# feature_dictionary = {'Feature Names':all_feature_names,'Importances':feature_importances}
# dictionary = pd.DataFrame(feature_dictionary)
# dictionary = dictionary.sort_values(by='Importances', axis=0, ascending=False, na_position='last')
# dictionary.to_csv(feature_importance_filename,index=0)
# except Exception as e:
# print('No feature importances collected, reporting exception: ', e)
def predict_data(self, df, out_file, name, other_columns=[],dropna=True):
"""
Uses trained model to predict accuracy over inputted data that has the response variable to asses score, such as training, validation, or test set.
Saves coordinates, true response variable, predicted response variable, residual, and copies of other_columns (if included) into out_file
Calculates the RMSE, R-Squared, average residual, and average absolute residual for scores.
Inputs:
df (DataFrame): Dataframe of data to predict over containing predictors and y_column
out_file (string): File name to save data with predictions to
name (string): Name of dataset to save scores using "save_scores" method, examples could be "training", "testing", or "validation"
other_columns (list): Other columns that should also be included, this could be a unique ID of datapoints
dropna (boolean): Whether to remove records with any nan values. If set to False and NaN values are not resolved, this will cause an error.
Returns: None
"""
# Prepare output dataframe with columns
if len(other_columns)>0:
out_df = pd.DataFrame(columns=self.xy+other_columns+['Y_true','Est','Residual'])
else:
out_df = pd.DataFrame(columns=self.xy+['Y_true','Est','Residual'])
out_df.to_csv(out_file,index=False)
# Remove records with NaN values if dropna is tru
if dropna:
df = df.dropna()
# Predict and calculate residual
#print(df[self.predictors])
y_hat = self.mdl.predict(df[self.predictors])
residual = df[self.y_column].values - y_hat
# Create series with data
dfY = pd.Series(df[self.y_column].values, name='Y_true')
dfY_hat = pd.Series(y_hat, name='Est')
dfResidual = pd.Series(residual, name='Residual')
dfCoords = df[self.xy].reset_index(drop=True)
# If other_columns are listed, merge all of this data and output
if len(other_columns)>0:
dfOtherVariables = df[other_columns].reset_index(drop=True)
df0 = pd.concat([dfCoords, dfOtherVariables, dfY, dfY_hat, dfResidual], axis=1)
out_df = pd.DataFrame(df0, columns=self.xy+other_columns+['Y_true','Est','Residual'])
out_df.to_csv(out_file, mode='a', header=False, index=False)
# Otherwise merge all the data and output
else:
df0 = pd.concat([dfCoords, dfY, dfY_hat, dfResidual], axis=1)
out_df = pd.DataFrame(df0, columns=self.xy+['Y_true','Est','Residual'])
out_df.to_csv(out_file, mode='a', header=False, index=False)
# Calculate scores and save as parameters to ForestLearn to output in "save_scores"
self.rmse[name] = math.sqrt(metrics.mean_squared_error(df[self.y_column], y_hat))
self.r2[name] = metrics.r2_score(df[self.y_column], y_hat)
self.avg_res[name] = np.mean(df[self.y_column] - y_hat)
self.avg_abs_res[name] = np.mean(abs(df[self.y_column] - y_hat))
def save_scores(self, out_file):
"""
Saves scores from predict_data
Inputs:
out_file (string): File name to save scores to
Returns: None
"""
# Create dictionary and save
dict_list = [self.rmse, self.r2, self.avg_res, self.avg_abs_res]
df = pd.DataFrame(dict_list)
df.insert(0, 'Scores', ['Root Mean Square Error','R-Squared','Average Residual','Average Absolute Residual'])
df.to_csv(out_file,index=False)
def predict_unsupervised_data(self, in_file, out_file, chunksize=500000, dropna=True):
"""
Uses trained model to predict over data from in_file and saves output to out_file
Inputs:
in_file (String): File name to load data from
out_file (string): File name to save data with predictions to
chunksize (integer): Chunk size to read data as, this is helpfull if the data is larger than memory can read
dropna (boolean): Whether to remove records with any nan values. If set to False and NaN values are not resolved, this will cause an error.
Returns: None
"""
# Prepare output dataframe
out_df = pd.DataFrame(columns=self.xy+['Est'])
out_df.to_csv(out_file,index=False)
# Read in file using extension
if '.csv' in in_file:
chunks = pd.read_csv(in_file, chunksize=chunksize)
else:
chunks = pd.read_hdf(in_file, chunksize=chunksize)
# Loop over chunks
for df in chunks:
# Remove records with NaN values if dropna is tru
if dropna:
df = df.dropna()
# Predict data
y_hat = self.mdl.predict(df[self.predictors])
# Save results
dfY_hat = pd.Series(y_hat, name='Est').reset_index(drop=True)
dfCoords = df[self.xy].reset_index(drop=True)
df0 = pd.concat([dfCoords, dfY_hat, dfProb], axis=1)
out_df = pd.DataFrame(df0, columns=self.xy+['Est'])
out_df.to_csv(out_file, mode='a', header=False, index=False)
'''
The following methods are for instatiating model pipelines, which creates steps for numeric feature scaling, one hot encoding, and feature selection.
Learn more about the sci-kit learn model pipelines here: https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html
We have a number of different ones precoded to use
'''
def setup_rf_model(self):
'''
Sets up a random forest model with no feature selection or scaling
Returns:
self.mdl (Sk-learn Pipeline object)
'''
mdl1 = RandomForestRegressor(
n_estimators=500,
max_features="sqrt",
min_samples_split=5,
oob_score=True,
)
estimators = [
('learn', mdl1)
]
self.mdl = Pipeline(estimators)
return self.mdl
def setup_rf_model_scale(self):
'''
Sets up a random forest model with numeric feature scaling and one-hot-encoding
Returns:
self.mdl (Sk-learn Pipeline object)
'''
numeric_transformer = Pipeline(steps=[
('scale', StandardScaler())])
categorical_transformer = Pipeline(steps=[
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, self.numeric_features),
('cat', categorical_transformer, self.one_hot_features)],remainder='passthrough')
mdl1 = RandomForestRegressor(
n_estimators=500,
max_features="sqrt",
min_samples_split=5,
oob_score=True,
)
estimators = [
('preprocess', preprocessor),
('learn', mdl1)
]
self.mdl = Pipeline(estimators)
return self.mdl
def setup_rf_model_PCA(self):
'''
Sets up a random forest model with numeric feature scaling, one-hot-encoding, and principle component analysis
Returns:
self.mdl (Sk-learn Pipeline object)
'''
numeric_transformer = Pipeline(steps=[
('scale', StandardScaler()),
('PCA', PCA(0.95))])
categorical_transformer = Pipeline(steps=[
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, self.numeric_features),
('cat', categorical_transformer, self.one_hot_features)],remainder='passthrough')
mdl1 = RandomForestRegressor(
n_estimators=500,
max_features="sqrt",
min_samples_split=5,
oob_score=True,
)
estimators = [
('preprocess', preprocessor),
('learn', mdl1)
]
self.mdl = Pipeline(estimators)
return self.mdl
def setup_rf_model_scale_SVR_FS(self):
'''
Sets up a random forest model with numeric feature scaling, one-hot-encoding, and support vector machine feature selection
Returns:
self.mdl (Sk-learn Pipeline object)
'''
numeric_transformer = Pipeline(steps=[
('scale', StandardScaler())])
categorical_transformer = Pipeline(steps=[
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, self.numeric_features),
('cat', categorical_transformer, self.one_hot_features)],remainder='passthrough')
mdl1 = RandomForestRegressor(
n_estimators=500,
max_features="sqrt",
min_samples_split=5,
oob_score=True,
)
estimators = [
('preprocess', preprocessor),
('feature_selection', SelectFromModel(LinearSVR())),
('learn', mdl1)
]
self.mdl = Pipeline(estimators)
return self.mdl
def setup_rf_model_scale_RF_FS(self):
'''
Sets up a random forest model with numeric feature scaling, one-hot-encoding, and random forest model feature selection
Returns:
self.mdl (Sk-learn Pipeline object)
'''
numeric_transformer = Pipeline(steps=[
('scale', StandardScaler())])
categorical_transformer = Pipeline(steps=[
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, self.numeric_features),
('cat', categorical_transformer, self.one_hot_features)],remainder='passthrough')
mdl1 = RandomForestRegressor(
n_estimators=500,
max_features="sqrt",
min_samples_split=5,
oob_score=True,
)
estimators = [
('preprocess', preprocessor),
('feature_selection', SelectFromModel(RandomForestRegressor(n_estimators=100))),
('learn', mdl1)
]
self.mdl = Pipeline(estimators)
return self.mdl
def setup_xgb_model(self):
'''
Sets up a XGBoost model
Returns:
self.mdl (Sk-learn Pipeline object)
'''
mdl1 = xgb.XGBRegressor(
learning_rate=0.1,
n_estimators=50,
objective='reg:squarederror',
eval_metric='rmse',
nthread=-1)
estimators = [
('learn', mdl1)
]
self.mdl = Pipeline(estimators)
return self.mdl
def setup_xgb_model_scale(self):
'''
Sets up a XGBoost model with numeric feature scaling and one-hot-encoding
Returns:
self.mdl (Sk-learn Pipeline object)
'''
numeric_transformer = Pipeline(steps=[('scale', StandardScaler())])
categorical_transformer = Pipeline(steps=[
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, self.numeric_features),
('cat', categorical_transformer, self.one_hot_features)],remainder='passthrough')
mdl1 = xgb.XGBRegressor(
learning_rate=0.1,
n_estimators=50,
objective='reg:squarederror',
eval_metric='rmse',
nthread=-1)
estimators = [
('preprocess', preprocessor),
('learn', mdl1)
]
self.mdl = Pipeline(estimators)
return self.mdl
def setup_xgb_model_PCA(self):
'''
Sets up a XGBoost model with numeric feature scaling, one-hot-encoding, and principle component analysis
Returns:
self.mdl (Sk-learn Pipeline object)
'''
numeric_transformer = Pipeline(steps=[
('scale', StandardScaler()),
('PCA', PCA(0.95))
])
categorical_transformer = Pipeline(steps=[
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, self.numeric_features),
('cat', categorical_transformer, self.one_hot_features)],remainder='passthrough')
mdl1 = xgb.XGBRegressor(
learning_rate=0.1,
n_estimators=50,
objective='reg:squarederror',
eval_metric='rmse',
nthread=-1)
estimators = [
('preprocess', preprocessor),
('learn', mdl1)
]
self.mdl = Pipeline(estimators)
return self.mdl
def setup_xgb_model_RF_FS(self):
'''
Sets up a XGBoost model with numeric feature scaling, one-hot-encoding, and random forest feature selection
Returns:
self.mdl (Sk-learn Pipeline object)
'''
numeric_transformer = Pipeline(steps=[('scale', StandardScaler())])
categorical_transformer = Pipeline(steps=[
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, self.numeric_features),
('cat', categorical_transformer, self.one_hot_features)],remainder='passthrough')
mdl1 = xgb.XGBRegressor(
learning_rate=0.1,
n_estimators=50,
objective='reg:squarederror',
eval_metric='rmse',
nthread=-1)
estimators = [
('preprocess', preprocessor),
('feature_selection', SelectFromModel(RandomForestRegressor(n_estimators=100))),
('learn', mdl1)
]
self.mdl = Pipeline(estimators)
return self.mdl
| 41.611557 | 160 | 0.627951 |
79426e7e57840738d65686499b55b8e7f573e7e6 | 1,573 | py | Python | src-django/api/migrations/0026_auto_20180313_2240.py | m-socha/sana.protocol_builder | 7b054bbab5ed981bd7bbc357e9657024f3e380e7 | [
"BSD-3-Clause"
] | 6 | 2015-06-05T22:41:10.000Z | 2017-09-06T07:08:09.000Z | src-django/api/migrations/0026_auto_20180313_2240.py | SanaMobile/sana.protocol_builder | e7e784797bf7b3a3060329f033fca5f411ebcc97 | [
"BSD-3-Clause"
] | 406 | 2015-01-11T05:50:07.000Z | 2018-01-05T23:01:04.000Z | src-django/api/migrations/0026_auto_20180313_2240.py | m-socha/sana.protocol_builder | 7b054bbab5ed981bd7bbc357e9657024f3e380e7 | [
"BSD-3-Clause"
] | 9 | 2015-10-12T23:39:31.000Z | 2018-07-11T20:59:32.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2018-03-13 22:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('api', '0025_merge'),
]
operations = [
migrations.CreateModel(
name='Subroutine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('created', models.DateTimeField(auto_now_add=True)),
('last_modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=255)),
('display_name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
],
options={
'ordering': ['last_modified'],
},
),
migrations.AlterField(
model_name='abstractelement',
name='concept',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='abstractelement', to='api.Concept'),
),
migrations.AddField(
model_name='abstractelement',
name='subroutine',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='abstractelements', to='api.Subroutine'),
),
]
| 36.581395 | 146 | 0.599491 |
79426ef5e49711f4547366c45ab2ad709983f8d1 | 2,687 | py | Python | workflow/plot_all_cartels.py | calltri/journal-citation-cartels | 2c7967eccb4783f136f8da47d417a0fe9e625c2a | [
"BSD-2-Clause"
] | null | null | null | workflow/plot_all_cartels.py | calltri/journal-citation-cartels | 2c7967eccb4783f136f8da47d417a0fe9e625c2a | [
"BSD-2-Clause"
] | null | null | null | workflow/plot_all_cartels.py | calltri/journal-citation-cartels | 2c7967eccb4783f136f8da47d417a0fe9e625c2a | [
"BSD-2-Clause"
] | null | null | null | import numpy as np
import pandas as pd
import utils
from scipy import sparse
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import matplotlib.colors as colors
from matplotlib import cm
import os
sys.path.append(os.path.abspath(os.path.join("libs/cidre")))
from cidre import cidre, filters, draw
def load_valid_cartel(year, cartel_dir):
"""Loads a cartel for a given year, returning an empty dataframe if none are there"""
fileName = "{root}/cartels-{year}.csv".format(root=cartel_dir, year=year)
if not os.path.exists(fileName):
return pd.DataFrame()
cartel_table = pd.read_csv(
fileName, sep="\t"
)
cartel_table["year"] = year
return cartel_table
def get_affiliation_name(graph, id):
"""Given database and id returns the display name"""
# Compute the paper count first
query = """
MATCH (a:Affiliations)
WHERE a.AffiliationId="%d"
return a.NormalizedName
""" % (
id,
)
df = graph.run(query).data()
df = df[0]['a.NormalizedName']
print("{id}: {name}".format(id=id, name=df))
return df
# python workflow/plot-all-cartels.py data/cartels data/figs data/networks 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018
if __name__ == '__main__':
CARTEL_DIR = sys.argv[1]
PLOT_DIR = sys.argv[2]
NETWORK_DIR = sys.argv[3]
YEARS = [int(y) for y in sys.argv[4:]]
theta = 0.15
graph = utils.get_db()
# For each year make plots
for year in YEARS:
citation_group_table = load_valid_cartel(year, CARTEL_DIR)
# Skip years that have nothing
if citation_group_table.empty:
continue
W, A_gen, nodes = utils.load_network(year, NETWORK_DIR)
# Load the class for drawing a cartel
dc = draw.DrawCartel()
# Set up the canvas
fig, axes = plt.subplots(figsize=(10,10))
sns.set_style("white")
sns.set(font_scale = 1.2)
sns.set_style("ticks")
# Set the name of each node
citation_group_table["name"] = citation_group_table["mag_affiliation_id"].apply(lambda x : str(get_affiliation_name(graph, x)))
for cid, cartel in citation_group_table.groupby("group_id"):
dc.draw(
W,
cartel.node_id.values.tolist(),
cartel.donor_score.values.tolist(),
cartel.recipient_score.values.tolist(),
theta,
cartel.name.values.tolist(),
ax=axes,
)
plt.savefig("{root}/{year}-{cid}-cartel-plots.png".format(root=PLOT_DIR, year=year, cid=cid))
| 31.244186 | 169 | 0.633048 |
7942721871d37e7572cda011766f4c262ee7123f | 2,077 | py | Python | resources/fred/evaluator.py | zeroshot-ts/zeroshot-ts | 177039565c3729dec0c25e8830c366c069b32ead | [
"Apache-2.0"
] | null | null | null | resources/fred/evaluator.py | zeroshot-ts/zeroshot-ts | 177039565c3729dec0c25e8830c366c069b32ead | [
"Apache-2.0"
] | null | null | null | resources/fred/evaluator.py | zeroshot-ts/zeroshot-ts | 177039565c3729dec0c25e8830c366c069b32ead | [
"Apache-2.0"
] | null | null | null | from collections import OrderedDict
from collections import OrderedDict
from dataclasses import dataclass
import numpy as np
from common.evaluator import Evaluator, EvaluationResult
from common.metrics import smape_2
from common.timeseries import TimeseriesBundle
from common.utils import round_half_up
from resources.fred.dataset import FredDataset, FredMeta
@dataclass
class FredEvaluator(Evaluator):
validation: bool = False
def evaluate(self, forecast: TimeseriesBundle) -> EvaluationResult:
insamples, _ = FredDataset(FredMeta.dataset_path).standard_split()
if self.validation:
horizons_map = FredMeta().horizons_map()
insamples, _ = insamples.split(lambda ts: ts.split(-horizons_map[ts.meta['seasonal_pattern']]))
grouped_smapes = {sp: np.mean(smape_2(forecast=np.array(self.filter_by_sp(forecast, sp).values()),
target=np.array(self.filter_by_sp(self.test_set, sp).values())))
for sp in FredMeta.seasonal_patterns}
grouped_smapes = self.summarize_groups(grouped_smapes)
return self.round_values(grouped_smapes)
def summarize_groups(self, scores):
scores_summary = OrderedDict()
weighted_score = {}
for sp in ['Yearly', 'Quarterly', 'Monthly', 'Weekly', 'Daily']:
weighted_score[sp] = scores[sp] * len(self.filter_by_sp(self.test_set, sp).timeseries)
scores_summary[sp] = scores[sp]
average = np.sum(list(weighted_score.values())) / len(self.test_set.timeseries)
scores_summary['Average'] = average
return scores_summary
@staticmethod
def filter_by_sp(bundle: TimeseriesBundle, seasonal_pattern: str) -> TimeseriesBundle:
return bundle.filter(lambda ts: ts.meta['seasonal_pattern'] == seasonal_pattern)
@staticmethod
def round_values(scores: OrderedDict):
rounded_scores = OrderedDict()
for k, v in scores.items():
rounded_scores[k] = round_half_up(v, 3)
return rounded_scores
| 37.763636 | 110 | 0.690419 |
79427296b2f1b12c30bfc7fd806b65648fe061d2 | 22,792 | py | Python | src/genie/libs/parser/iosxe/show_lldp.py | ngrundler/genieparser | a78dfde72042744d4ea559f87cb014821d9a305a | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/iosxe/show_lldp.py | ngrundler/genieparser | a78dfde72042744d4ea559f87cb014821d9a305a | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/iosxe/show_lldp.py | ngrundler/genieparser | a78dfde72042744d4ea559f87cb014821d9a305a | [
"Apache-2.0"
] | null | null | null | """show_lldp.py
supported commands:
* show lldp
* show lldp entry *
* show lldp entry [<WORD>]
* show lldp interface [<WORD>]
* show lldp neighbors detail
* show lldp traffic
"""
import re
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, \
Any, \
Optional, \
Or, \
And, \
Default, \
Use
# import parser utils
from genie.libs.parser.utils.common import Common
class ShowLldpSchema(MetaParser):
"""Schema for show lldp"""
schema = {
'status': str,
'enabled': bool,
'hello_timer': int,
'hold_timer': int,
'reinit_timer': int
}
class ShowLldp(ShowLldpSchema):
"""Parser for show lldp"""
cli_command = 'show lldp'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# initial return dictionary
ret_dict = {}
# initial regexp pattern
p1 = re.compile(r'^Status: +(?P<status>\w+)$')
p2 = re.compile(r'^LLDP +(?P<pattern>[\w\s]+) +(?P<value>\d+) +seconds$')
for line in out.splitlines():
line = line.strip()
# Status: ACTIVE
m = p1.match(line)
if m:
status = m.groupdict()['status'].lower()
ret_dict['status'] = status
ret_dict['enabled'] = True if 'active' in status else False
continue
# LLDP advertisements are sent every 30 seconds
# LLDP hold time advertised is 120 seconds
# LLDP interface reinitialisation delay is 2 seconds
m = p2.match(line)
if m:
group = m.groupdict()
if re.search('(advertisements +are +sent +every)', group['pattern']):
key = 'hello_timer'
elif re.search('(hold +time +advertised +is)', group['pattern']):
key = 'hold_timer'
elif re.search('(interface +reinitialisation +delay +is)', group['pattern']):
key = 'reinit_timer'
else:
continue
ret_dict[key] = int(group['value'])
continue
return ret_dict
class ShowLldpEntrySchema(MetaParser):
"""Schema for show lldp entry [<WORD>|*]"""
schema = {
'total_entries': int,
Optional('interfaces'): {
Any(): {
'if_name': str,
'port_id': {
Any(): {
'neighbors': {
Any(): {
'chassis_id': str,
'port_id': str,
'neighbor_id': str,
Optional('port_description'): str,
Optional('system_description'): str,
Optional('system_name'): str,
'time_remaining': int,
Optional('capabilities'): {
Any():{
Optional('system'): bool,
Optional('enabled'): bool,
'name': str,
}
},
Optional('management_address'): str,
Optional('auto_negotiation'): str,
Optional('physical_media_capabilities'): list,
Optional('unit_type'): int,
Optional('vlan_id'): int,
}
}
}
}
}
},
Optional('med_information'): {
'f/w_revision': str,
Optional('h/w_revision'): str,
Optional('s/w_revision'): str,
'manufacturer': str,
'model': str,
'capabilities': list,
'device_type': str,
'network_policy': {
Any(): { # 'voice'; 'voice_signal'
'vlan': int, # 110
'tagged': bool,
'layer_2_priority': int,
'dscp': int,
},
},
Optional('serial_number'): str,
'power_source': str,
'power_priority': str,
'wattage': float,
'location': str,
}
}
class ShowLldpEntry(ShowLldpEntrySchema):
"""Parser for show lldp entry {* | word}"""
CAPABILITY_CODES = {'R': 'router',
'B': 'mac_bridge',
'T': 'telephone',
'C': 'docsis_cable_device',
'W': 'wlan_access_point',
'P': 'repeater',
'S': 'station_only',
'O': 'other'}
cli_command = ['show lldp entry {entry}', 'show lldp entry *']
def cli(self, entry='',output=None):
if output is None:
if entry:
cmd = self.cli_command[0].format(entry=entry)
else:
cmd = self.cli_command[1]
out = self.device.execute(cmd)
else:
out = output
# initial return dictionary
ret_dict = {}
item = ''
sub_dict = {}
# ==== initial regexp pattern ====
# Local Intf: Gi2/0/15
p1 = re.compile(r'^Local\s+Intf:\s+(?P<intf>[\w\/\.\-]+)$')
# Port id: Gi1/0/4
p1_1 = re.compile(r'^Port\s+id:\s+(?P<port_id>[\S\s]+)$')
# Chassis id: 843d.c638.b980
p2 = re.compile(r'^Chassis\s+id:\s+(?P<chassis_id>[\w\.]+)$')
# Port Description: GigabitEthernet1/0/4
p3 = re.compile(r'^Port\s+Description:\s+(?P<desc>[\w\/\.\-\s]+)$')
# System Name: R5
# System Name - not advertised
p4 = re.compile(r'^System\s+Name(?: +-|:)\s+(?P<name>[\S\s]+)$')
# System Description:
p5 = re.compile(r'^System\s+Description:.*$')
# Cisco IOS Software, C3750E Software (C3750E-UNIVERSALK9-M), Version 12.2(58)SE2, RELEASE SOFTWARE (fc1)
# Technical Support: http://www.cisco.com/techsupport
# Copyright (c) 1986-2011 by Cisco Systems, Inc.
# Cisco IP Phone 7962G,V12, SCCP42.9-3-1ES27S
p5_1 = re.compile(r'^(?P<msg>(Cisco +IOS +Software|Technical Support|Copyright|Cisco IP Phone).*)$')
# Compiled Thu 21-Jul-11 01:23 by prod_rel_team
# Avaya 1220 IP Deskphone, Firmware:06Q
# IP Phone, Firmware:90234AP
p5_2 = re.compile(r'^(?P<msg>(Compile|Avaya|IP Phone).*)$')
# Time remaining: 112 seconds
p6 = re.compile(r'^Time\s+remaining:\s+(?P<time_remaining>\w+)\s+seconds$')
# System Capabilities: B,R
p7 = re.compile(r'^System\s+Capabilities:\s+(?P<capab>[\w\,\s]+)$')
# Enabled Capabilities: B,R
p8 = re.compile(r'^Enabled\s+Capabilities:\s+(?P<capab>[\w\,\s]+)$')
# Management Addresses:
# IP: 10.9.1.1
# Management Addresses - not advertised
p9 = re.compile(r'^IP:\s+(?P<ip>[\w\.]+)$')
p9_1 = re.compile(r'^Management\s+Addresses\s+-\s+(?P<ip>not\sadvertised)$')
# Auto Negotiation - supported, enabled
p10 = re.compile(r'^Auto\s+Negotiation\s+\-\s+(?P<auto_negotiation>[\w\s\,]+)$')
# Physical media capabilities:
p11 = re.compile(r'^Physical\s+media\s+capabilities:$')
# 1000baseT(FD)
# 100base-TX(HD)
# Symm, Asym Pause(FD)
# Symm Pause(FD)
p11_1 = re.compile(r'^(?P<physical_media_capabilities>[\S\(\s]+(HD|FD)[\)])$')
# Media Attachment Unit type: 30
p12 = re.compile(r'^Media\s+Attachment\s+Unit\s+type:\s+(?P<unit_type>\d+)$')
# Vlan ID: 1
# Note: not parsing 'not advertised since value type is int
p13 = re.compile(r'^^Vlan\s+ID:\s+(?P<vlan_id>\d+)$')
# Total entries displayed: 4
p14 = re.compile(r'^Total\s+entries\s+displayed:\s+(?P<entry>\d+)$')
# ==== MED Information patterns =====
# F/W revision: 06Q
# S/W revision: SCCP42.9-3-1ES27S
# H/W revision: 12
med_p1 = re.compile(r'^(?P<head>(H/W|F/W|S/W))\s+revision:\s+(?P<revision>\S+)$')
# Manufacturer: Avaya-05
med_p2 = re.compile(r'^Manufacturer:\s+(?P<manufacturer>[\S\s]+)$')
# Model: 1220 IP Deskphone
med_p3 = re.compile(r'^Model:\s+(?P<model>[\S\s]+)$')
# Capabilities: NP, LI, PD, IN
med_p4 = re.compile(r'^Capabilities:\s+(?P<capabilities>[\S\s]+)$')
# Device type: Endpoint Class III
med_p5 = re.compile(r'^Device\s+type:\s+(?P<device_type>[\S\s]+)$')
# Network Policy(Voice): VLAN 110, tagged, Layer-2 priority: 5, DSCP: 46
# Network Policy(Voice Signal): VLAN 110, tagged, Layer-2 priority: 0, DSCP: 0
med_p6 = re.compile(r'^Network\s+Policy\(Voice(\s+(?P<voice_signal>Signal))?\):'
r'\s+VLAN\s+(?P<vlan>\d+),\s+(?P<tagged>tagged),\s+'
r'Layer-2 priority:\s+(?P<layer_2_priority>\d+),\s+DSCP:\s+(?P<dscp>\d+)$')
# PD device, Power source: Unknown, Power Priority: High, Wattage: 6.0
med_p7 = re.compile(r'^(?P<device_type>PD device),\s+Power\s+source:\s+(?P<power_source>\S+),\s+'
r'Power\s+Priority:\s+(?P<power_priority>\S+),\s+Wattage:\s+(?P<wattage>\S+)$')
# Location - not advertised
med_p8 = re.compile(r'^Location\s+-\s+(?P<location>[\S\s]+)$')
# Serial number: FCH1610A5S5
med_p9 = re.compile(r'^Serial\s+number:\s+(?P<serial_number>\S+)$')
for line in out.splitlines():
line = line.strip()
# Local Intf: Gi2/0/15
m = p1.match(line)
if m:
intf = Common.convert_intf_name(m.groupdict()['intf'])
intf_dict = ret_dict.setdefault('interfaces', {}).setdefault(intf, {})
intf_dict['if_name'] = intf
sub_dict = {}
continue
# Chassis id: 843d.c638.b980
m = p2.match(line)
if m:
sub_dict = {}
chassis_id = m.groupdict()['chassis_id']
sub_dict.setdefault('chassis_id', chassis_id)
continue
# Port id: Gi1/0/4
m = p1_1.match(line)
if m:
if 'interfaces' not in ret_dict:
intf_dict = ret_dict.setdefault('interfaces', {}).setdefault('N/A', {})
intf_dict['if_name'] = 'N/A'
port_id = Common.convert_intf_name(m.groupdict()['port_id'])
port_dict = intf_dict.setdefault('port_id', {}). \
setdefault(port_id, {})
sub_dict.setdefault('port_id', port_id)
continue
# Port Description: GigabitEthernet1/0/4
m = p3.match(line)
if m:
sub_dict.setdefault('port_description', m.groupdict()['desc'])
continue
# System Name: R5
# System Name - not advertised
m = p4.match(line)
if m:
name = m.groupdict()['name']
nei_dict = port_dict.setdefault('neighbors', {}).setdefault(name, {})
sub_dict['system_name'] = name
nei_dict['neighbor_id'] = name
nei_dict.update(sub_dict)
continue
# System Description:
m = p5.match(line)
if m:
nei_dict.update({'system_description': ''})
continue
# Cisco IOS Software, C3750E Software (C3750E-UNIVERSALK9-M), Version 12.2(58)SE2, RELEASE SOFTWARE (fc1)
# Technical Support: http://www.cisco.com/techsupport
# Copyright (c) 1986-2011 by Cisco Systems, Inc.
# Cisco IP Phone 7962G,V12, SCCP42.9-3-1ES27S
m = p5_1.match(line)
if m:
nei_dict['system_description'] += m.groupdict()['msg'] + '\n'
continue
# Compiled Thu 21-Jul-11 01:23 by prod_rel_team
# Avaya 1220 IP Deskphone, Firmware:06Q
# IP Phone, Firmware:90234AP
m = p5_2.match(line)
if m:
nei_dict['system_description'] += m.groupdict()['msg']
continue
# Time remaining: 112 seconds
m = p6.match(line)
if m:
nei_dict['time_remaining'] = int(m.groupdict()['time_remaining'])
continue
# System Capabilities: B,R
m = p7.match(line)
if m:
cap = [self.CAPABILITY_CODES[n] for n in m.groupdict()['capab'].split(',')]
for item in cap:
cap_dict = nei_dict.setdefault('capabilities', {}).\
setdefault(item, {})
cap_dict['name'] = item
cap_dict['system'] = True
continue
# Enabled Capabilities: B,R
m = p8.match(line)
if m:
cap = [self.CAPABILITY_CODES[n] for n in m.groupdict()['capab'].split(',')]
for item in cap:
cap_dict = nei_dict.setdefault('capabilities', {}).\
setdefault(item, {})
cap_dict['name'] = item
cap_dict['enabled'] = True
continue
# Management Addresses:
# IP: 10.9.1.1
# Management Addresses - not advertised
m = p9.match(line) or p9_1.match(line)
if m:
nei_dict['management_address'] = m.groupdict()['ip']
continue
# Auto Negotiation - supported, enabled
m = p10.match(line)
if m:
nei_dict['auto_negotiation'] = m.groupdict()['auto_negotiation']
continue
# Physical media capabilities:
m = p11.match(line)
if m:
nei_dict['physical_media_capabilities'] = []
continue
# 1000baseT(FD)
# 100base-TX(HD)
# Symm, Asym Pause(FD)
# Symm Pause(FD)
m = p11_1.match(line)
if m:
item = nei_dict.get('physical_media_capabilities', [])
item.append(m.groupdict()['physical_media_capabilities'])
nei_dict['physical_media_capabilities'] = item
continue
# Media Attachment Unit type: 30
m = p12.match(line)
if m:
nei_dict['unit_type'] = int(m.groupdict()['unit_type'])
continue
# Vlan ID: 1
# Note: not parsing 'not advertised since value type is int
m = p13.match(line)
if m:
nei_dict['vlan_id'] = int(m.groupdict()['vlan_id'])
continue
# Total entries displayed: 4
m = p14.match(line)
if m:
ret_dict['total_entries'] = int(m.groupdict()['entry'])
continue
# ==== Med Information ====
# F/W revision: 06Q
# S/W revision: SCCP42.9-3-1ES27S
# H/W revision: 12
m = med_p1.match(line)
if m:
group = m.groupdict()
med_dict = ret_dict.setdefault('med_information', {})
med_dict[group['head'].lower()+'_revision'] = m.groupdict()['revision']
continue
# Manufacturer: Avaya-05
# Model: 1220 IP Deskphone
# Device type: Endpoint Class III
m = med_p2.match(line) or med_p3.match(line) or med_p5.match(line)
if m:
match_key = [*m.groupdict().keys()][0]
med_dict[match_key] = m.groupdict()[match_key]
continue
# Capabilities: NP, LI, PD, IN
m = med_p4.match(line)
if m:
list_capabilities = m.groupdict()['capabilities'].split(', ')
med_dict['capabilities'] = list_capabilities
continue
# Network Policy(Voice): VLAN 110, tagged, Layer-2 priority: 5, DSCP: 46
# Network Policy(Voice Signal): VLAN 110, tagged, Layer-2 priority: 0, DSCP: 0
m = med_p6.match(line)
if m:
group = m.groupdict()
if group['voice_signal']:
voice = 'voice_signal'
else:
voice = 'voice'
voice_sub_dict = med_dict.setdefault('network_policy', {}).\
setdefault(voice, {})
if group['tagged'] == 'tagged':
voice_sub_dict['tagged'] = True
else:
voice_sub_dict['tagged'] = False
for k in ['layer_2_priority', 'dscp', 'vlan']:
voice_sub_dict[k] = int(group[k])
continue
# PD device, Power source: Unknown, Power Priority: High, Wattage: 6.0
m = med_p7.match(line)
if m:
for k in ['device_type', 'power_source', 'power_priority']:
med_dict[k] = m.groupdict()[k]
med_dict['wattage'] = float(m.groupdict()['wattage'])
continue
# Location - not advertised
m = med_p8.match(line)
if m:
med_dict['location'] = m.groupdict()['location']
continue
# Serial number: FCH1610A5S5
m = med_p9.match(line)
if m:
med_dict['serial_number']: m.groupdict()['serial_number']
continue
return ret_dict
class ShowLldpNeighborsDetail(ShowLldpEntry):
'''Parser for show lldp neighbors detail'''
cli_command = 'show lldp neighbors detail'
exclude = ['time_remaining']
def cli(self,output=None):
if output is None:
show_output = self.device.execute(self.cli_command)
else:
show_output = output
return super().cli(output=show_output)
class ShowLldpTrafficSchema(MetaParser):
"""Schema for show lldp traffic"""
schema = {
"frame_in": int,
"frame_out": int,
"frame_error_in": int,
"frame_discard": int,
"tlv_discard": int,
'tlv_unknown': int,
'entries_aged_out': int
}
class ShowLldpTraffic(ShowLldpTrafficSchema):
"""Parser for show lldp traffic"""
cli_command = 'show lldp traffic'
exclude = ['frame_in' , 'frame_out', 'tlv_discard', 'tlv_unknown']
def cli(self,output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# initial return dictionary
ret_dict = {}
# initial regexp pattern
p1 = re.compile(r'^(?P<pattern>[\w\s]+): +(?P<value>\d+)$')
for line in out.splitlines():
line = line.strip()
# Total frames out: 20372
# Total entries aged: 34
# Total frames in: 13315
# Total frames received in error: 0
# Total frames discarded: 14
# Total TLVs discarded: 0
# Total TLVs unrecognized: 0
m = p1.match(line)
if m:
group = m.groupdict()
if re.search('(Total +frames +out)', group['pattern']):
key = 'frame_out'
elif re.search('(Total +entries +aged)', group['pattern']):
key = 'entries_aged_out'
elif re.search('(Total +frames +in)', group['pattern']):
key = 'frame_in'
elif re.search('(Total +frames +received +in +error)', group['pattern']):
key = 'frame_error_in'
elif re.search('(Total +frames +discarded)', group['pattern']):
key = 'frame_discard'
elif re.search('(Total +TLVs +discarded)', group['pattern']):
key = 'tlv_discard'
elif re.search('(Total +TLVs +unrecognized)', group['pattern']):
key = 'tlv_unknown'
else:
continue
ret_dict[key] = int(group['value'])
continue
return ret_dict
class ShowLldpInterfaceSchema(MetaParser):
"""Schema for show lldp interface [<WORD>]"""
schema = {
'interfaces': {
Any(): {
'tx': str,
'rx': str,
'tx_state': str,
'rx_state': str,
},
}
}
class ShowLldpInterface(ShowLldpInterfaceSchema):
"""Parser for show lldp interface [<WORD>]"""
cli_command = ['show lldp interface {interface}','show lldp interface']
def cli(self, interface='',output=None):
if output is None:
if interface:
cmd = self.cli_command[0].format(interface=interface)
else:
cmd = self.cli_command[1]
out = self.device.execute(cmd)
else:
out = output
# initial return dictionary
ret_dict = {}
# initial regexp pattern
p1 = re.compile(r'^(?P<intf>[\w\/\-\.]+):$')
p2 = re.compile(r'^(?P<key>[\w\s]+): +(?P<value>[\w\s]+)$')
for line in out.splitlines():
line = line.strip()
# GigabitEthernet1/0/15
m = p1.match(line)
if m:
intf_dict = ret_dict.setdefault('interfaces', {}).\
setdefault(m.groupdict()['intf'], {})
continue
# Tx: enabled
# Rx: enabled
# Tx state: IDLE
# Rx state: WAIT FOR FRAME
m = p2.match(line)
if m:
group = m.groupdict()
key = '_'.join(group['key'].lower().split())
intf_dict[key] = group['value'].lower()
continue
return ret_dict
| 36.063291 | 117 | 0.478282 |
794273ed9dcb4fbaa257e3b10cd126bda79258d9 | 935 | py | Python | datastructure/practice/c7/c_7_37.py | stoneyangxu/python-kata | 979af91c74718a525dcd2a83fe53ec6342af9741 | [
"MIT"
] | null | null | null | datastructure/practice/c7/c_7_37.py | stoneyangxu/python-kata | 979af91c74718a525dcd2a83fe53ec6342af9741 | [
"MIT"
] | null | null | null | datastructure/practice/c7/c_7_37.py | stoneyangxu/python-kata | 979af91c74718a525dcd2a83fe53ec6342af9741 | [
"MIT"
] | null | null | null | import unittest
from datastructure.links.PositionList import PositionList
def has_sum_equals(position_list, V):
if position_list.is_empty():
return None
sum_dict = {}
current = position_list.first()
while current is not None:
if current.element() in sum_dict:
return sum_dict[current.element()], current
sum_dict[V - current.element()] = current
current = position_list.after(current)
return None
class MyTestCase(unittest.TestCase):
def test_something(self):
position_list = PositionList()
position_list.add_last(1)
p = position_list.add_last(3)
position_list.add_last(4)
q = position_list.add_last(7)
position_list.add_last(5)
self.assertEqual(None, has_sum_equals(position_list, 22))
self.assertEqual((p, q), has_sum_equals(position_list, 10))
if __name__ == '__main__':
unittest.main()
| 26.714286 | 67 | 0.673797 |
794275121810a83919f3894fcb094ff16cfdf2e1 | 1,074 | bzl | Python | nisaba/scripts/brahmic/constant.bzl | google-research/nisaba | 41cfe4390b421ecfb26b351a6b36d85dfb7ba153 | [
"Apache-2.0"
] | 16 | 2020-12-03T22:45:18.000Z | 2022-01-21T07:28:35.000Z | nisaba/scripts/brahmic/constant.bzl | google-research/nisaba | 41cfe4390b421ecfb26b351a6b36d85dfb7ba153 | [
"Apache-2.0"
] | 9 | 2021-02-19T19:50:21.000Z | 2021-09-01T21:06:26.000Z | nisaba/scripts/brahmic/constant.bzl | google-research/nisaba | 41cfe4390b421ecfb26b351a6b36d85dfb7ba153 | [
"Apache-2.0"
] | 2 | 2021-11-05T14:49:41.000Z | 2022-01-29T18:27:54.000Z | # Copyright 2021 Nisaba Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starlark constants for Brahmic targets."""
# Script and language codes are used as per IANA registry:
# https://www.iana.org/assignments/language-subtag-registry
SCRIPTS = [
"Beng",
"Bugi",
"Deva",
"Gujr",
"Guru",
"Knda",
"Lepc",
"Limb",
"Mlym",
"Mtei",
"Newa",
"Orya",
"Sinh",
"Sylo",
"Takr",
"Taml",
"Telu",
"Tglg",
"Thaa",
"Tirh",
]
LANG_DIRS = {
"Beng": ["as", "bn"],
}
FIXED_RULE_SCRIPTS = ["Mlym"]
| 22.851064 | 74 | 0.648045 |
794275be32255d7dc0de4facc367bd4115a900ed | 3,074 | py | Python | recipes/Python/578203_pygmail_can_send_mail/recipe-578203.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/578203_pygmail_can_send_mail/recipe-578203.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/578203_pygmail_can_send_mail/recipe-578203.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 9 20:57:29 2012
@author: garrett
@email: [email protected]
original pygmail from:
https://github.com/vinod85/pygmail/blob/master/pygmail.py
"""
import imaplib, smtplib
import re
from email.mime.text import MIMEText
class pygmail(object):
IMAP_SERVER='imap.gmail.com'
IMAP_PORT=993
SMTP_SERVER = 'smtp.gmail.com'
SMTP_PORT=465
def __init__(self):
self.M = None
self.response = None
self.mailboxes = []
def login(self, username, password):
self.M = imaplib.IMAP4_SSL(self.IMAP_SERVER, self.IMAP_PORT)
self.S = smtplib.SMTP_SSL(self.SMTP_SERVER, self.SMTP_PORT)
rc, self.response = self.M.login(username, password)
sc, self.response_s = self.S.login(username, password)
self.username = username
return rc, sc
def send_mail(self, to_addrs, msg, subject = None):
msg = MIMEText(msg)
if subject != None:
msg['Subject'] = subject
msg['From'] = self.username
msg['To'] = to_addrs
return self.S.sendmail(self.username, to_addrs, msg.as_string())
def get_mailboxes(self):
rc, self.response = self.M.list()
for item in self.response:
self.mailboxes.append(item.split()[-1])
return rc
def get_mail_count(self, folder='Inbox'):
rc, self.response = self.M.select(folder)
return self.response[0]
def get_unread_count(self, folder='Inbox'):
rc, self.response = self.M.status(folder, "(UNSEEN)")
unreadCount = re.search("UNSEEN (\d+)", self.response[0]).group(1)
return unreadCount
def get_imap_quota(self):
quotaStr = self.M.getquotaroot("Inbox")[1][1][0]
r = re.compile('\d+').findall(quotaStr)
if r == []:
r.append(0)
r.append(0)
return float(r[1])/1024, float(r[0])/1024
def get_mails_from(self, uid, folder='Inbox'):
status, count = self.M.select(folder, readonly=1)
status, response = self.M.search(None, 'FROM', uid)
email_ids = [e_id for e_id in response[0].split()]
return email_ids
def get_mail_from_id(self, id):
status, response = self.M.fetch(id, '(body[header.fields (subject)])')
return response
def rename_mailbox(self, oldmailbox, newmailbox):
rc, self.response = self.M.rename(oldmailbox, newmailbox)
return rc
def create_mailbox(self, mailbox):
rc, self.response = self.M.create(mailbox)
return rc
def delete_mailbox(self, mailbox):
rc, self.response = self.M.delete(mailbox)
return rc
def logout(self):
self.M.logout()
self.S.quit()
if __name__ == '__main__':
user = '[email protected]'
pwd = 'govegan4life'
gm = pygmail()
gm.login(user, pwd)
send_to = '[email protected]'
msg = 'Hi there, have you ever thought about the suffering of animals? Go vegan!'
gm.send_mail(send_to, msg, 'peace')
| 29.84466 | 85 | 0.614509 |
79427603c2774214d29eb900cd10e1c722b9f4fd | 1,627 | py | Python | mli/server.py | m-pilia/mli | 2e351cd98dbc9e689e252b823bb2ba63ac028716 | [
"MIT"
] | 1 | 2020-04-14T13:05:42.000Z | 2020-04-14T13:05:42.000Z | mli/server.py | m-pilia/mli | 2e351cd98dbc9e689e252b823bb2ba63ac028716 | [
"MIT"
] | 1 | 2020-04-14T19:08:22.000Z | 2020-04-14T19:08:22.000Z | mli/server.py | m-pilia/mli | 2e351cd98dbc9e689e252b823bb2ba63ac028716 | [
"MIT"
] | null | null | null | import json
import logging
import socket
import sys
import threading
import matlab.engine
from .mlsession import MLSession
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
class _ML_JSON_Encoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, matlab.engine.FutureResult):
return 'FutureResult'
return super(_ML_JSON_Encoder, self).default(o)
def _serve_client(connection, matlab_session):
try:
with connection:
data = bytes.decode(connection.recv(4096))
logger.info('Serving request: %s', data)
try:
data = json.loads(data)
ret = matlab_session.do(data['action'], *data['args'], **data['kwargs'])
connection.sendall(json.dumps(ret, cls=_ML_JSON_Encoder).encode())
except json.decoder.JSONDecodeError:
msg = sys.exc_info()
logger.error(msg)
connection.sendall(json.dumps({'ret': None, 'out': str(msg)}).encode())
except ConnectionAbortedError:
return
def main(args):
logger.setLevel(args.log_level)
ml = MLSession(session_name=args.session_name)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(3.0)
s.bind((args.host, args.port))
s.listen()
logger.info('Waiting for incoming connections...')
while True:
try:
conn, _ = s.accept()
threading.Thread(target=_serve_client, args=(conn, ml)).start()
except socket.timeout:
continue
| 29.581818 | 88 | 0.617087 |
794276ed9c52c42667f5f048710c55d9de9dfd0a | 3,789 | py | Python | src/app/tests/mailchimp/tests_mailchimp_http.py | denkasyanov/education-backend | c796b6f2f1cc1cd09f83cab2ca0cc45344906ef5 | [
"MIT"
] | 151 | 2020-04-21T09:58:57.000Z | 2021-09-12T09:01:21.000Z | src/app/tests/mailchimp/tests_mailchimp_http.py | denkasyanov/education-backend | c796b6f2f1cc1cd09f83cab2ca0cc45344906ef5 | [
"MIT"
] | 163 | 2020-05-29T20:52:00.000Z | 2021-09-11T12:44:56.000Z | src/app/tests/mailchimp/tests_mailchimp_http.py | boochamoocha/education-backend | c6ffb0c00bc066c8f1e0a8c0ffe4d0215c7c416a | [
"MIT"
] | 39 | 2020-04-21T12:28:16.000Z | 2021-09-12T15:33:47.000Z | import pytest
from app.integrations.mailchimp.exceptions import MailchimpNotFound, MailchimpWrongResponse
pytestmark = [pytest.mark.django_db]
def test_get_ok(mailchimp):
mailchimp.http_mock.get('https://us05.api.mailchimp.com/3.0/test/endpoint', json={'ok': True})
assert mailchimp.http.get('test/endpoint') == {'ok': True}
def test_custom_status_code(mailchimp):
mailchimp.http_mock.get('https://us05.api.mailchimp.com/3.0/test/endpoint', json={'ok': True}, status_code=204)
assert mailchimp.http.request(url='test/endpoint', method='GET', expected_status_code=204) == {'ok': True}
def test_custom_status_code_fail(mailchimp):
mailchimp.http_mock.get('https://us05.api.mailchimp.com/3.0/test/endpoint', json={'ok': True}, status_code=200)
with pytest.raises(MailchimpWrongResponse):
mailchimp.http.request(method='GET', url='test/endpoint', expected_status_code=931)
def test_get_no_content(mailchimp):
mailchimp.http_mock.get('https://us05.api.mailchimp.com/3.0/test/endpoint')
assert mailchimp.http.get('test/endpoint') is None
def test_post_ok(mailchimp):
mailchimp.http_mock.post('https://us05.api.mailchimp.com/3.0/test/endpoint', json={'ok': True})
assert mailchimp.http.post('test/endpoint', payload={}) == {'ok': True}
def test_post_no_content(mailchimp):
mailchimp.http_mock.post('https://us05.api.mailchimp.com/3.0/test/endpoint')
assert mailchimp.http.post('test/endpoint', payload={}) is None
@pytest.mark.parametrize(('code', 'exception'), [
(504, MailchimpWrongResponse),
(404, MailchimpNotFound),
])
def test_get_wrong_status_codes(mailchimp, code, exception):
mailchimp.http_mock.get('https://us05.api.mailchimp.com/3.0/test/endpoint', json={'ok': True}, status_code=code)
with pytest.raises(exception):
mailchimp.http.get('test/endpoint')
@pytest.mark.parametrize(('code', 'exception'), [
(504, MailchimpWrongResponse),
(404, MailchimpNotFound),
])
def test_post_wrong_status_codes(mailchimp, code, exception):
mailchimp.http_mock.post('https://us05.api.mailchimp.com/3.0/test/endpoint', json={'ok': True}, status_code=code)
with pytest.raises(exception):
mailchimp.http.post('test/endpoint', payload={})
def test_post_payload(mailchimp):
def assertion(request, context):
json = request.json()
assert json['__mocked'] == 'test'
return {'ok': True}
mailchimp.http_mock.post('https://us05.api.mailchimp.com/3.0/test/endpoint', json=assertion)
mailchimp.http.post('test/endpoint', payload={
'__mocked': 'test',
})
@pytest.mark.xfail(strict=True, reason='Just to check above test works')
def test_post_payload_fail(mailchimp):
def assertion(request, context):
json = request.json()
assert json['__mocked'] == 'SHOULD NOT BE MOCKED'
return {'ok': True}
mailchimp.http_mock.post('https://us05.api.mailchimp.com/3.0/test/endpoint', json=assertion)
mailchimp.http.post('test/endpoint', payload={
'__mocked': 'test',
})
def test_authentication(mailchimp):
def assertion(request, context):
assert request.headers['Authorization'] == 'Basic dXNlcjprZXktdXMwNQ=='
return {'ok': True}
mailchimp.http_mock.get('https://us05.api.mailchimp.com/3.0/test/endpoint', json=assertion)
mailchimp.http.get('test/endpoint')
@pytest.mark.xfail(strict=True, reason='Just to check above test works')
def test_authentication_wr0ng(mailchimp):
def assertion(request, context):
assert request.headers['Authorization'] == 'UNKNOWN AUTH DO NOT WORK'
return {'ok': True}
mailchimp.http_mock.get('https://us05.api.mailchimp.com/3.0/test/endpoint', json=assertion)
mailchimp.http.get('test/endpoint')
| 31.840336 | 117 | 0.705199 |
79427742646a548f413048bbcf24e1c596ca5f84 | 2,108 | py | Python | vision/body_analysis/ultraface/demo.py | mbencer/models | fb8271d5d5d9b90dbb1eb5e8e40f8f580fb248b3 | [
"MIT"
] | 1 | 2021-01-22T03:24:38.000Z | 2021-01-22T03:24:38.000Z | vision/body_analysis/ultraface/demo.py | mbencer/models | fb8271d5d5d9b90dbb1eb5e8e40f8f580fb248b3 | [
"MIT"
] | null | null | null | vision/body_analysis/ultraface/demo.py | mbencer/models | fb8271d5d5d9b90dbb1eb5e8e40f8f580fb248b3 | [
"MIT"
] | null | null | null | import cv2
import onnxruntime as ort
import argparse
import numpy as np
from dependencies.box_utils import predict
# ------------------------------------------------------------------------------------------------------------------------------------------------
# Face detection using UltraFace-320 onnx model
face_detector_onnx = "../ultraface/models/version-RFB-320.onnx"
face_detector = ort.InferenceSession(face_detector_onnx)
# scale current rectangle to box
def scale(box):
width = box[2] - box[0]
height = box[3] - box[1]
maximum = max(width, height)
dx = int((maximum - width)/2)
dy = int((maximum - height)/2)
bboxes = [box[0] - dx, box[1] - dy, box[2] + dx, box[3] + dy]
return bboxes
# crop image
def cropImage(image, box):
num = image[box[1]:box[3], box[0]:box[2]]
return num
# face detection method
def faceDetector(orig_image, threshold = 0.7):
image = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (320, 240))
image_mean = np.array([127, 127, 127])
image = (image - image_mean) / 128
image = np.transpose(image, [2, 0, 1])
image = np.expand_dims(image, axis=0)
image = image.astype(np.float32)
input_name = face_detector.get_inputs()[0].name
confidences, boxes = face_detector.run(None, {input_name: image})
boxes, labels, probs = predict(orig_image.shape[1], orig_image.shape[0], confidences, boxes, threshold)
return boxes, labels, probs
# ------------------------------------------------------------------------------------------------------------------------------------------------
# Main void
parser=argparse.ArgumentParser()
parser.add_argument("-i", "--image", type=str, required=False, help="input image")
args=parser.parse_args()
img_path = args.image if args.image else "dependencies/1.jpg"
color = (255, 128, 0)
orig_image = cv2.imread(img_path)
boxes, labels, probs = faceDetector(orig_image)
for i in range(boxes.shape[0]):
box = scale(boxes[i, :])
cv2.rectangle(orig_image, (box[0], box[1]), (box[2], box[3]), color, 4)
cv2.imshow('', orig_image)
| 35.133333 | 146 | 0.588235 |
79427a245390294b40e56cd6f9de84794f6ef58f | 2,645 | py | Python | gpytorch/settings.py | ediphy-dwild/gpytorch | 559c78a6446237ed7cc8e1cc7cf4ed8bf31a3c8a | [
"MIT"
] | null | null | null | gpytorch/settings.py | ediphy-dwild/gpytorch | 559c78a6446237ed7cc8e1cc7cf4ed8bf31a3c8a | [
"MIT"
] | null | null | null | gpytorch/settings.py | ediphy-dwild/gpytorch | 559c78a6446237ed7cc8e1cc7cf4ed8bf31a3c8a | [
"MIT"
] | null | null | null | class _feature_flag(object):
_state = False
@classmethod
def on(cls):
return cls._state
@classmethod
def _set_state(cls, state):
cls._state = state
def __init__(self, state=True):
self.prev = self.__class__.on()
self.state = state
def __enter__(self):
self.__class__._set_state(self.state)
def __exit__(self, *args):
self.__class__._set_state(self.prev)
return False
class _value_context(object):
_global_value = None
@classmethod
def value(cls):
return cls._global_value
@classmethod
def _set_value(cls, value):
cls._global_value = value
def __init__(self, value):
self._orig_value = self.__class__.value()
self._instance_value = value
def __enter__(self, ):
self.__class__._set_value(self._instance_value)
def __exit__(self, *args):
self.__class__._set_value(self._orig_value)
return False
class max_cg_iterations(_value_context):
"""
The maximum number of conjugate gradient iterations to perform (when computing matrix solves)
More values results in more accurate solves
Default: 20
"""
_global_value = 20
class max_lanczos_iterations(_value_context):
"""
The maximum number of Lanczos iterations to perform
This is used when 1) computing variance estiamtes 2) when drawing from MVNs, or
3) for kernel multiplication
More values results in higher accuracy
Default: 100
"""
_global_value = 100
class max_lanczos_quadrature_iterations(_value_context):
"""
The maximum number of Lanczos iterations to perform when doing stochastic Lanczos
quadrature. This is ONLY used for log determinant calculations and computing Tr(K^{-1}dK/d\theta)
"""
_global_value = 15
class num_likelihood_samples(_value_context):
"""
The number of samples to draw from a latent GP when computing a likelihood
This is used in variational inference and training
Default: 10
"""
_global_value = 10
class num_trace_samples(_value_context):
"""
The number of samples to draw when stochastically computing the trace of a matrix
More values results in more accurate trace estimations
If the value is set to 0, then the trace will be deterministically computed
Default: 10
"""
_global_value = 10
class use_toeplitz(_feature_flag):
"""
Whether or not to use Toeplitz math with gridded data, grid inducing point modules
Pros: memory efficient, faster on CPU
Cons: slower on GPUs with < 10000 inducing points
"""
_state = True
| 26.188119 | 101 | 0.691871 |
79427a54d575585bb232ccdef6691b5c7ca36eca | 27,055 | py | Python | sphinx_toolbox/more_autodoc/regex.py | domdfcoding/sphinx-toolbox | fe5a35d6b4fce617514c4c243ad94fb8bd86b0bf | [
"MIT"
] | 1 | 2020-09-27T15:37:27.000Z | 2020-09-27T15:37:27.000Z | sphinx_toolbox/more_autodoc/regex.py | domdfcoding/sphinx-toolbox | fe5a35d6b4fce617514c4c243ad94fb8bd86b0bf | [
"MIT"
] | 4 | 2020-08-25T19:01:19.000Z | 2020-12-11T16:58:07.000Z | sphinx_toolbox/more_autodoc/regex.py | domdfcoding/sphinx-toolbox | fe5a35d6b4fce617514c4c243ad94fb8bd86b0bf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# regex.py
r"""
Specialized Documenter for regular expression variables, similar to :rst:dir:`autodata`.
.. versionadded:: 1.2.0
.. extensions:: sphinx_toolbox.more_autodoc.regex
Usage
-------
.. rst:directive:: autoregex
Directive to automatically document a regular expression variable.
The output is based on the :rst:dir:`autodata` directive, and takes all of its options except ``:annotation:``.
.. rst:directive:option:: no-value
Don't show the value of the variable.
.. rst:directive:option:: value: value
:type: string
Show this instead of the value taken from the Python source code.
.. rst:directive:option:: no-type
Don't show the type of the variable.
.. rst:directive:option:: no-flags
Don't show the flags of the :class:`~typing.Pattern` object.
.. rst:directive:option:: flags: flags
:type: string
Show this instead of the flags taken from the :class:`~typing.Pattern` object.
This should be correctly formatted for insertion into reStructuredText, such as ``:py:data:`re.ASCII```.
.. versionchanged:: 2.7.0
The flags :py:data:`re.DEBUG` and :py:data:`re.VERBOSE` are now hidden
as they don't affect the regex itself.
.. latex:clearpage::
.. rst:role:: regex
Formats a regular expression with coloured output.
.. rest-example::
:regex:`^Hello\s+[Ww]orld[.,](Lovely|Horrible) weather, isn't it (.*)?`
.. versionchanged:: 2.11.0 Now generates coloured output with the LaTeX builder.
"""
#
# Copyright © 2020-2022 Dominic Davis-Foster <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# Parts based on https://github.com/sphinx-doc/sphinx
# | Copyright (c) 2007-2020 by the Sphinx team (see AUTHORS file).
# | BSD Licensed
# | All rights reserved.
# |
# | Redistribution and use in source and binary forms, with or without
# | modification, are permitted provided that the following conditions are
# | met:
# |
# | * Redistributions of source code must retain the above copyright
# | notice, this list of conditions and the following disclaimer.
# |
# | * Redistributions in binary form must reproduce the above copyright
# | notice, this list of conditions and the following disclaimer in the
# | documentation and/or other materials provided with the distribution.
# |
# | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# stdlib
import itertools
import re
import sre_parse
from sre_constants import (
ANY,
AT,
AT_BEGINNING,
AT_BEGINNING_STRING,
AT_BOUNDARY,
AT_END,
AT_END_STRING,
AT_NON_BOUNDARY,
BRANCH,
CATEGORY,
CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT,
CATEGORY_NOT_SPACE,
CATEGORY_NOT_WORD,
CATEGORY_SPACE,
CATEGORY_WORD,
IN,
LITERAL,
MAX_REPEAT,
MAXREPEAT,
MIN_REPEAT,
RANGE,
SUBPATTERN
)
from textwrap import dedent
from typing import Any, Callable, List, Optional, Pattern, Tuple
# 3rd party
import dict2css
from docutils import nodes
from docutils.nodes import Node, system_message
from domdf_python_tools.paths import PathPlus
from domdf_python_tools.stringlist import StringList
from sphinx.application import Sphinx
from sphinx.ext.autodoc import UNINITIALIZED_ATTR, ModuleDocumenter
from sphinx.util import texescape
from sphinx.util.docutils import SphinxRole
from sphinx.writers.html import HTMLTranslator
# this package
from sphinx_toolbox import _css
from sphinx_toolbox.more_autodoc.variables import VariableDocumenter
from sphinx_toolbox.utils import Config, SphinxExtMetadata, add_nbsp_substitution, flag, metadata_add_version
__all__ = (
"RegexDocumenter",
"RegexParser",
"TerminalRegexParser",
"HTMLRegexParser",
"LaTeXRegexParser",
"parse_regex_flags",
"no_formatting",
"span",
"latex_textcolor",
"copy_asset_files",
"setup",
)
class RegexDocumenter(VariableDocumenter):
"""
Specialized Documenter subclass for regex patterns.
"""
directivetype = "data"
objtype = "regex"
priority = VariableDocumenter.priority + 1
option_spec = {
**VariableDocumenter.option_spec,
"no-flag": flag,
"flag": str,
}
del option_spec["type"]
del option_spec["annotation"]
@classmethod
def can_document_member(
cls,
member: Any,
membername: str,
isattr: bool,
parent: Any,
) -> bool:
"""
Called to see if a member can be documented by this documenter.
:param member: The member being checked.
:param membername: The name of the member.
:param isattr:
:param parent: The parent of the member.
"""
return isinstance(parent, ModuleDocumenter) and isattr and isinstance(member, Pattern)
def add_content(self, more_content: Any, no_docstring: bool = False) -> None:
"""
Add content from docstrings, attribute documentation and the user.
:param more_content:
:param no_docstring:
"""
# set sourcename and add content from attribute documentation
sourcename = self.get_sourcename()
if self.analyzer:
attr_docs = self.analyzer.find_attr_docs()
if self.objpath:
key = ('.'.join(self.objpath[:-1]), self.objpath[-1])
if key in attr_docs:
no_docstring = True
# make a copy of docstring for attributes to avoid cache
# the change of autodoc-process-docstring event.
docstrings = [list(attr_docs[key])]
for i, line in enumerate(self.process_doc(docstrings)):
self.add_line(line, sourcename, i)
# add content from docstrings
if not no_docstring:
docstrings = self.get_doc() or []
if not docstrings:
# append at least a dummy docstring, so that the event
# autodoc-process-docstring is fired and can add some
# content if desired
docstrings.append([])
if docstrings == [["Compiled regular expression objects", '']] or docstrings == [[]]:
docstrings = [["Compiled regular expression object.", '']] # pylint: disable=W8301
for i, line in enumerate(self.process_doc(docstrings)):
self.add_line(line, sourcename, i)
# add additional content (e.g. from document), if present
if more_content:
for line, src in zip(more_content.data, more_content.items):
self.add_line(line, src[0], src[1])
no_value = self.options.get("no-value", False)
no_flag = self.options.get("no-flag", False)
if self.object is not UNINITIALIZED_ATTR and (not no_value or not no_flag):
self.add_line('', sourcename)
self.add_line('', sourcename)
the_flag: Optional[str] = None
if not no_flag:
if "flag" in self.options:
the_flag = self.options["flag"]
else:
raw_flags = self.object.flags
raw_flags = (raw_flags & ~re.DEBUG) & ~re.VERBOSE
the_flag = parse_regex_flags(raw_flags)
if no_value and not the_flag:
return
self.add_line(".. csv-table::", sourcename)
self.add_line(" :widths: auto", sourcename)
self.add_line(" :stub-columns: 1", sourcename)
self.add_line('', sourcename)
if not no_value:
if "value" in self.options:
the_pattern = self.options["value"]
else:
the_pattern = self.object.pattern
the_pattern = the_pattern.replace('`', r"\`")
leading_spaces = len(tuple(itertools.takewhile(str.isspace, the_pattern)))
trailing_spaces = len(tuple(itertools.takewhile(str.isspace, the_pattern[::-1])))
the_pattern = the_pattern.strip(' ')
if leading_spaces > 1:
the_pattern = f"[ ]{leading_spaces}{the_pattern}"
elif leading_spaces == 1:
the_pattern = f"[ ]{the_pattern}"
if trailing_spaces > 1:
the_pattern += f" {trailing_spaces}"
elif trailing_spaces == 1:
the_pattern += "[ ]"
self.add_line(f' **Pattern**, ":regex:`{the_pattern}`"', sourcename)
if the_flag:
self.add_line(f" **Flags**, {the_flag}", sourcename)
self.add_line('', sourcename)
def add_directive_header(self, sig: str) -> None:
"""
Add the directive's header.
:param sig:
"""
user_no_value = self.options.get("no-value", False)
self.options["no-value"] = True
super().add_directive_header(sig)
self.options["no-value"] = user_no_value
def parse_regex_flags(flags: int) -> str:
"""
Convert regex flags into "bitwise-or'd" Sphinx xrefs.
:param flags:
"""
buf = []
if flags & re.ASCII:
buf.append("ASCII")
if flags & re.DEBUG:
buf.append("DEBUG")
if flags & re.IGNORECASE:
buf.append("IGNORECASE")
if flags & re.LOCALE:
buf.append("LOCALE")
if flags & re.MULTILINE:
buf.append("MULTILINE")
if flags & re.DOTALL:
buf.append("DOTALL")
if flags & re.VERBOSE:
buf.append("VERBOSE")
return " ``|`` ".join(f":py:data:`re.{x}`" for x in buf)
def no_formatting(value: Any) -> str:
"""
No-op that returns the value as a string.
Used for unformatted output.
"""
return str(value)
class RegexParser:
r"""
Parser for regular expressions that outputs coloured output.
The formatting is controlled by the following callable attributes:
* ``AT_COLOUR`` -- Used for e.g. :regex:`^\A\b\B\Z$`
* ``SUBPATTERN_COLOUR`` -- Used for the parentheses around subpatterns, e.g. :regex:`(Hello) World`
* ``IN_COLOUR`` -- Used for the square brackets around character sets, e.g. :regex:`[Hh]ello`
* ``REPEAT_COLOUR`` -- Used for repeats, e.g. :regex:`A?B+C*D{2,4}E{5}`
* ``REPEAT_BRACE_COLOUR`` -- Used for the braces around numerical repeats.
* ``CATEGORY_COLOUR`` -- Used for categories, e.g. :regex:`\d\D\s\D\w\W`
* ``BRANCH_COLOUR`` -- Used for branches, e.g. :regex:`(Lovely|Horrible) Weather`
* ``LITERAL_COLOUR`` -- Used for literal characters.
* ``ANY_COLOUR`` -- Used for the "any" dot.
These are all :class:`~typing.Callable`\[[:class:`~typing.Any`], :class:`str`\].
By default no formatting is performed.
Subclasses should set these attributes to appropriate functions.
"""
# Colours
AT_COLOUR: Callable[[Any], str] = no_formatting
SUBPATTERN_COLOUR: Callable[[Any], str] = no_formatting
IN_COLOUR: Callable[[Any], str] = no_formatting
REPEAT_COLOUR: Callable[[Any], str] = no_formatting
REPEAT_BRACE_COLOUR: Callable[[Any], str] = no_formatting
CATEGORY_COLOUR: Callable[[Any], str] = no_formatting
BRANCH_COLOUR: Callable[[Any], str] = no_formatting
LITERAL_COLOUR: Callable[[Any], str] = no_formatting
ANY_COLOUR: Callable[[Any], str] = no_formatting
def parse_pattern(self, regex: Pattern) -> str:
"""
Parse the given regular expression and return the formatted pattern.
:param regex:
"""
buf = []
def _parse_pattern(pattern) -> None: # noqa: MAN001
for what, content in pattern:
# print(what, content)
if what is AT:
if content is AT_BEGINNING:
buf.append(type(self).AT_COLOUR('^'))
continue
elif content is AT_END:
buf.append(type(self).AT_COLOUR('$'))
continue
elif content is AT_BEGINNING_STRING:
buf.append(type(self).AT_COLOUR(r"\A"))
continue
elif content is AT_BOUNDARY:
buf.append(type(self).AT_COLOUR(r"\b"))
continue
elif content is AT_NON_BOUNDARY:
buf.append(type(self).AT_COLOUR(r"\B"))
continue
elif content is AT_END_STRING:
buf.append(type(self).AT_COLOUR(r"\Z"))
continue
if what is SUBPATTERN:
buf.append(type(self).SUBPATTERN_COLOUR('('))
group, add_flags, del_flags, subpattern = content
# print(group, add_flags, del_flags)
_parse_pattern(subpattern)
buf.append(type(self).SUBPATTERN_COLOUR(')'))
continue
if what is LITERAL:
# TODO: escape characters that have meaning to avoid ambiguity
buf.append(type(self).LITERAL_COLOUR(chr(content)))
continue
if what is IN:
if len(content) > 1 or content[0][0] is RANGE:
buf.append(type(self).IN_COLOUR('['))
_parse_pattern(content)
if len(content) > 1 or content[0][0] is RANGE:
buf.append(type(self).IN_COLOUR(']'))
continue
if what is MAX_REPEAT or what is MIN_REPEAT:
min_, max_, item = content
_parse_pattern(item)
if min_ == 0 and max_ is MAXREPEAT:
buf.append(type(self).REPEAT_COLOUR('*'))
elif min_ == 1 and max_ is MAXREPEAT:
buf.append(type(self).REPEAT_COLOUR('+'))
elif min_ == 0 and max_ == 1:
buf.append(type(self).REPEAT_COLOUR('?'))
elif min_ == max_:
buf.append(type(self).REPEAT_BRACE_COLOUR('{'))
buf.append(type(self).REPEAT_COLOUR(str(min_)))
buf.append(type(self).REPEAT_BRACE_COLOUR('}'))
else:
buf.append(type(self).REPEAT_BRACE_COLOUR('{'))
buf.append(type(self).REPEAT_COLOUR(str(min_)))
buf.append(type(self).LITERAL_COLOUR(','))
buf.append(type(self).REPEAT_COLOUR(str(max_)))
buf.append(type(self).REPEAT_BRACE_COLOUR('}'))
if what is MIN_REPEAT:
buf.append(type(self).REPEAT_COLOUR('?'))
continue
#
# if what is MIN_REPEAT:
# min_, max_, item = content
# _parse_pattern(item)
# print(min_, max_, item)
# input(">>>")
if what is CATEGORY:
if content is CATEGORY_DIGIT:
buf.append(type(self).CATEGORY_COLOUR(r"\d"))
continue
elif content is CATEGORY_NOT_DIGIT:
buf.append(type(self).CATEGORY_COLOUR(r"\D"))
continue
elif content is CATEGORY_SPACE:
buf.append(type(self).CATEGORY_COLOUR(r"\s"))
continue
elif content is CATEGORY_NOT_SPACE:
buf.append(type(self).CATEGORY_COLOUR(r"\S"))
continue
elif content is CATEGORY_WORD:
buf.append(type(self).CATEGORY_COLOUR(r"\w"))
continue
elif content is CATEGORY_NOT_WORD:
buf.append(type(self).CATEGORY_COLOUR(r"\W"))
continue
if what is BRANCH:
for branch in content[1]:
_parse_pattern(branch)
buf.append(type(self).BRANCH_COLOUR('|'))
buf.pop(-1)
continue
if what is RANGE:
buf.append(type(self).LITERAL_COLOUR(chr(content[0])))
buf.append(type(self).AT_COLOUR('-'))
buf.append(type(self).LITERAL_COLOUR(chr(content[1])))
continue
if what is ANY:
buf.append(type(self).ANY_COLOUR('.'))
continue
print(what, content) # pragma: no cover
pattern = regex.pattern.replace('\t', r"\t")
# Remove leading and trailing spaces from the pattern. They will be added back at the end.
leading_spaces = len(tuple(itertools.takewhile(str.isspace, pattern)))
trailing_spaces = len(tuple(itertools.takewhile(str.isspace, pattern[::-1])))
pattern = pattern.strip(' ')
tokens: List = list(sre_parse.parse(pattern, regex.flags)) # type: ignore[call-overload]
if not leading_spaces:
while tokens[0] == (LITERAL, ord(' ')):
leading_spaces += 1
tokens.pop(0)
if not trailing_spaces:
while tokens[-1] == (LITERAL, ord(' ')):
trailing_spaces += 1
tokens.pop(-1)
if leading_spaces:
buf.append(type(self).IN_COLOUR('['))
buf.append(type(self).LITERAL_COLOUR(' '))
buf.append(type(self).IN_COLOUR(']'))
if leading_spaces > 1:
buf.append(type(self).REPEAT_BRACE_COLOUR('{'))
buf.append(type(self).REPEAT_COLOUR(str(leading_spaces)))
buf.append(type(self).REPEAT_BRACE_COLOUR('}'))
_parse_pattern(tokens)
if trailing_spaces == 1:
buf.append(type(self).IN_COLOUR('['))
buf.append(type(self).LITERAL_COLOUR(' '))
buf.append(type(self).IN_COLOUR(']'))
elif trailing_spaces > 1:
buf.append(type(self).LITERAL_COLOUR(' '))
buf.append(type(self).REPEAT_BRACE_COLOUR('{'))
buf.append(type(self).REPEAT_COLOUR(str(trailing_spaces)))
buf.append(type(self).REPEAT_BRACE_COLOUR('}'))
return ''.join(buf)
def span(css_class: str) -> Callable[[Any], str]:
"""
Returns a function that wraps a value in a ``span`` tag with the given class.
:param css_class:
"""
def f(value: Any) -> str:
return f'<span class="{css_class}">{value}</span>'
return f
def latex_textcolor(colour_name: str) -> Callable[[Any], str]:
"""
Returns a function that wraps a value in a LaTeX ``textcolor`` command for the given colour.
.. versionadded:: 2.11.0
:param colour_name:
"""
def f(value: Any) -> str:
if value == ' ':
return "\\enspace"
return f'\\textcolor{{{colour_name}}}{{{texescape.escape(value)}}}'
return f
class HTMLRegexParser(RegexParser):
r"""
:class:`~.RegexParser` that outputs styled HTML.
The formatting is controlled by the following functions, which
wrap the character in a ``span`` tag with an appropriate CSS class:
* ``AT_COLOUR`` -> ``regex_at`` -- Used for e.g. :regex:`^\A\b\B\Z$`
* ``SUBPATTERN_COLOUR`` -> ``regex_subpattern`` -- Used for the parentheses around subpatterns, e.g. :regex:`(Hello) World`
* ``IN_COLOUR`` -> ``regex_in`` -- Used for the square brackets around character sets, e.g. :regex:`[Hh]ello`
* ``REPEAT_COLOUR`` -> ``regex_repeat`` -- Used for repeats, e.g. :regex:`A?B+C*D{2,4}E{5}`
* ``REPEAT_BRACE_COLOUR`` -> ``regex_repeat_brace`` -- Used for the braces around numerical repeats.
* ``CATEGORY_COLOUR`` -> ``regex_category`` -- Used for categories, e.g. :regex:`\d\D\s\D\w\W`
* ``BRANCH_COLOUR`` -> ``regex_branch`` -- Used for branches, e.g. :regex:`(Lovely|Horrible) Weather`
* ``LITERAL_COLOUR`` -> ``regex_literal`` -- Used for literal characters.
* ``ANY_COLOUR`` -> ``regex_any`` -- Used for the "any" dot.
Additionally, all ``span`` tags the ``regex`` class,
and the surrounding ``code`` tag has the following classes:
``docutils literal notranslate regex``.
"""
# Colours
AT_COLOUR = span("regex regex_at")
SUBPATTERN_COLOUR = span("regex regex_subpattern")
IN_COLOUR = span("regex regex_in")
REPEAT_COLOUR = span("regex regex_repeat")
REPEAT_BRACE_COLOUR = span("regex regex_repeat_brace")
CATEGORY_COLOUR = span("regex regex_category")
BRANCH_COLOUR = span("regex regex_branch")
LITERAL_COLOUR = span("regex regex_literal")
ANY_COLOUR = span("regex regex_any")
def parse_pattern(self, regex: Pattern) -> str:
"""
Parse the given regular expression and return the formatted pattern.
:param regex:
"""
return dedent(
f"""
<code class="docutils literal notranslate regex">
{super().parse_pattern(regex)}
</code>
"""
)
class LaTeXRegexParser(RegexParser):
r"""
:class:`~.RegexParser` that outputs styled LaTeX.
The formatting is controlled by the following functions, which
wrap the character in a LaTeX ``textcolor`` command for an appropriate colour:
* ``AT_COLOUR`` -> ``regex_at`` -- Used for e.g. :regex:`^\A\b\B\Z$`
* ``SUBPATTERN_COLOUR`` -> ``regex_subpattern`` -- Used for the parentheses around subpatterns, e.g. :regex:`(Hello) World`
* ``IN_COLOUR`` -> ``regex_in`` -- Used for the square brackets around character sets, e.g. :regex:`[Hh]ello`
* ``REPEAT_COLOUR`` -> ``regex_repeat`` -- Used for repeats, e.g. :regex:`A?B+C*D{2,4}E{5}`
* ``REPEAT_BRACE_COLOUR`` -> ``regex_repeat_brace`` -- Used for the braces around numerical repeats.
* ``CATEGORY_COLOUR`` -> ``regex_category`` -- Used for categories, e.g. :regex:`\d\D\s\D\w\W`
* ``BRANCH_COLOUR`` -> ``regex_branch`` -- Used for branches, e.g. :regex:`(Lovely|Horrible) Weather`
* ``LITERAL_COLOUR`` -> ``regex_literal`` -- Used for literal characters.
* ``ANY_COLOUR`` -> ``regex_any`` -- Used for the "any" dot.
.. versionadded:: 2.11.0
"""
# Colours
AT_COLOUR = latex_textcolor("regex_at")
SUBPATTERN_COLOUR = latex_textcolor("regex_subpattern")
IN_COLOUR = latex_textcolor("regex_in")
REPEAT_COLOUR = latex_textcolor("regex_repeat")
REPEAT_BRACE_COLOUR = latex_textcolor("regex_repeat_brace")
CATEGORY_COLOUR = latex_textcolor("regex_category")
BRANCH_COLOUR = latex_textcolor("regex_branch")
LITERAL_COLOUR = latex_textcolor("regex_literal")
ANY_COLOUR = latex_textcolor("regex_any")
def parse_pattern(self, regex: Pattern) -> str:
"""
Parse the given regular expression and return the formatted pattern.
:param regex:
"""
return f"\\sphinxcode{{\\sphinxupquote{{{super().parse_pattern(regex)}}}}}"
class TerminalRegexParser(RegexParser):
r"""
:class:`~.RegexParser` that outputs ANSI coloured output for the terminal.
The formatting is controlled by the following callable attributes,
which set ANSI escape codes for the appropriate colour:
* ``AT_COLOUR`` -> YELLOW, Used for e.g. :regex:`^\A\b\B\Z$`
* ``SUBPATTERN_COLOUR`` -> LIGHTYELLOW_EX, Used for the parentheses around subpatterns, e.g. :regex:`(Hello) World`
* ``IN_COLOUR`` -> LIGHTRED_EX, Used for the square brackets around character sets, e.g. :regex:`[Hh]ello`
* ``REPEAT_COLOUR`` -> LIGHTBLUE_EX, Used for repeats, e.g. :regex:`A?B+C*D{2,4}E{5}`
* ``REPEAT_BRACE_COLOUR`` -> YELLOW, Used for the braces around numerical repeats.
* ``CATEGORY_COLOUR`` -> LIGHTYELLOW_EX, Used for categories, e.g. :regex:`\d\D\s\D\w\W`
* ``BRANCH_COLOUR`` -> YELLOW, Used for branches, e.g. :regex:`(Lovely|Horrible) Weather`
* ``LITERAL_COLOUR`` -> GREEN, Used for literal characters.
* ``ANY_COLOUR`` -> YELLOW, Used for the "any" dot.
"""
# Colours
@staticmethod
def AT_COLOUR(s: str) -> str: # noqa: D102
return f"\x1b[33m{s}\x1b[39m"
@staticmethod
def SUBPATTERN_COLOUR(s: str) -> str: # noqa: D102
return f"\x1b[93m{s}\x1b[39m"
@staticmethod
def IN_COLOUR(s: str) -> str: # noqa: D102
return f"\x1b[91m{s}\x1b[39m"
@staticmethod
def REPEAT_COLOUR(s: str) -> str: # noqa: D102
return f"\x1b[94m{s}\x1b[39m"
@staticmethod
def LITERAL_COLOUR(s: str) -> str: # noqa: D102
return f"\x1b[32m{s}\x1b[39m"
REPEAT_BRACE_COLOUR = BRANCH_COLOUR = ANY_COLOUR = AT_COLOUR
CATEGORY_COLOUR = SUBPATTERN_COLOUR
class RegexNode(nodes.literal):
"""
Docutils Node to show a highlighted regular expression.
"""
def __init__(self, rawsource: str = '', text: str = '', *children, **attributes) -> None:
super().__init__(rawsource, text, *children, **attributes)
self.pattern = re.compile(':'.join(rawsource.split(':')[2:])[1:-1])
class Regex(SphinxRole):
"""
Docutils role to show a highlighted regular expression.
"""
def run(self) -> Tuple[List[Node], List[system_message]]:
"""
Process the content of the regex role.
"""
options = self.options.copy()
return [RegexNode(self.rawtext, self.text, **options)], []
def visit_regex_node(translator: HTMLTranslator, node: RegexNode) -> None:
"""
Visit an :class:`~.RegexNode`.
:param translator:
:param node: The node being visited.
"""
translator.body.append(regex_parser.parse_pattern(node.pattern))
def depart_regex_node(translator: HTMLTranslator, node: RegexNode) -> None:
"""
Depart an :class:`~.RegexNode`.
:param translator:
:param node: The node being visited.
"""
translator.body.pop(-1)
def visit_regex_node_latex(translator: HTMLTranslator, node: RegexNode) -> None:
"""
Visit an :class:`~.RegexNode` with the LaTeX builder.
.. versionadded:: 2.11.0
:param translator:
:param node: The node being visited.
"""
translator.body.append(latex_regex_parser.parse_pattern(node.pattern))
def depart_regex_node_latex(translator: HTMLTranslator, node: RegexNode) -> None:
"""
Depart an :class:`~.RegexNode` with the LaTeX builder.
.. versionadded:: 2.11.0
:param translator:
:param node: The node being visited.
"""
translator.body.pop(-1)
def copy_asset_files(app: Sphinx, exception: Optional[Exception] = None) -> None:
"""
Copy additional stylesheets into the HTML build directory.
:param app: The Sphinx application.
:param exception: Any exception which occurred and caused Sphinx to abort.
"""
if exception: # pragma: no cover
return
if app.builder is None or app.builder.format.lower() != "html": # pragma: no cover
return
static_dir = PathPlus(app.outdir) / "_static"
static_dir.maybe_make(parents=True)
dict2css.dump(_css.regex_styles, static_dir / "regex.css", minify=True)
regex_parser = HTMLRegexParser()
latex_regex_parser = LaTeXRegexParser()
def configure(app: Sphinx, config: Config) -> None:
"""
Configure :mod:`sphinx_toolbox.code`.
.. versionadded:: 2.11.0
:param app: The Sphinx application.
:param config:
"""
latex_elements = getattr(app.config, "latex_elements", {})
latex_preamble = StringList(latex_elements.get("preamble", ''))
latex_preamble.blankline()
latex_preamble.append(r"\definecolor{regex_literal}{HTML}{696969}")
latex_preamble.append(r"\definecolor{regex_at}{HTML}{FF4500}")
latex_preamble.append(r"\definecolor{regex_repeat_brace}{HTML}{FF4500}")
latex_preamble.append(r"\definecolor{regex_branch}{HTML}{FF4500}")
latex_preamble.append(r"\definecolor{regex_subpattern}{HTML}{1e90ff}")
latex_preamble.append(r"\definecolor{regex_in}{HTML}{ff8c00}")
latex_preamble.append(r"\definecolor{regex_category}{HTML}{8fbc8f}")
latex_preamble.append(r"\definecolor{regex_repeat}{HTML}{FF4500}")
latex_preamble.append(r"\definecolor{regex_any}{HTML}{FF4500}")
latex_elements["preamble"] = str(latex_preamble)
app.config.latex_elements = latex_elements # type: ignore[attr-defined]
add_nbsp_substitution(config)
@metadata_add_version
def setup(app: Sphinx) -> SphinxExtMetadata:
"""
Setup :mod:`sphinx_toolbox.more_autodoc.regex`.
:param app: The Sphinx application.
"""
app.setup_extension("sphinx.ext.autodoc")
app.setup_extension("sphinx_toolbox._css")
app.connect("config-inited", configure)
app.add_autodocumenter(RegexDocumenter)
app.add_role("regex", Regex())
app.add_node(
RegexNode,
html=(visit_regex_node, depart_regex_node),
latex=(visit_regex_node_latex, depart_regex_node_latex)
)
return {"parallel_read_safe": True}
| 30.884703 | 124 | 0.69854 |
79427a885417c92efe763249ac490326d737d769 | 1,027 | py | Python | 14_exception/08_except_info.py | hemuke/python | bc99f2b5aee997083ae31f59a2b33db48c8255f3 | [
"Apache-2.0"
] | null | null | null | 14_exception/08_except_info.py | hemuke/python | bc99f2b5aee997083ae31f59a2b33db48c8255f3 | [
"Apache-2.0"
] | null | null | null | 14_exception/08_except_info.py | hemuke/python | bc99f2b5aee997083ae31f59a2b33db48c8255f3 | [
"Apache-2.0"
] | null | null | null | #! /root/anaconda3/bin/python
"""
标准库模块sys中的函数exc_info以获取异常的相关信息
异常的类型: <class 'ZeroDivisionError'>
异常的错误信息:division by zero
异常调用堆栈的跟踪信息:<traceback object at 0x7f689cbf5180>
提取Traceback对象中包含的信息,调用标准库traceback中的函数extract_tb()
[<FrameSummary file ./08_except_info.py, line 11 in f2>, <FrameSummary file ./08_except_info.py, line 7 in f1>]
文件名:./08_except_info.py
行数:11
函数名:f2
源码:f1()
文件名:./08_except_info.py
行数:7
函数名:f1
源码:print(1 / 0)
"""
import sys
import traceback
def f1():
print(1 / 0)
def f2():
try:
f1()
except ZeroDivisionError:
ex_type, ex_value, ex_traceback = sys.exc_info()
print('异常的类型: %s' % ex_type)
print('异常的错误信息:%s' % ex_value)
print('异常调用堆栈的跟踪信息:%s' % ex_traceback)
tb = traceback.extract_tb(ex_traceback)
print(tb)
for filename, linenum, funcname, source in tb:
print('文件名:%s' % filename)
print('行数:%s' % linenum)
print('函数名:%s' % funcname)
print('源码:%s' % source)
f2()
| 19.377358 | 111 | 0.638754 |
79427b041f818bbdb52d9c92f760eaed786e95fd | 4,763 | py | Python | protoc_gen_swagger/options/annotations_pb2.py | universe-proton/protoc-gen-swagger | b572618d0aadcef63224bf85ebba05270b573a53 | [
"Apache-2.0"
] | 5 | 2018-01-29T12:55:41.000Z | 2020-05-27T09:10:33.000Z | protoc_gen_swagger/options/annotations_pb2.py | universe-proton/protoc-gen-swagger | b572618d0aadcef63224bf85ebba05270b573a53 | [
"Apache-2.0"
] | null | null | null | protoc_gen_swagger/options/annotations_pb2.py | universe-proton/protoc-gen-swagger | b572618d0aadcef63224bf85ebba05270b573a53 | [
"Apache-2.0"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: protoc-gen-swagger/options/annotations.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from protoc_gen_swagger.options import openapiv2_pb2 as protoc__gen__swagger_dot_options_dot_openapiv2__pb2
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='protoc-gen-swagger/options/annotations.proto',
package='grpc.gateway.protoc_gen_swagger.options',
syntax='proto3',
serialized_pb=_b('\n,protoc-gen-swagger/options/annotations.proto\x12\'grpc.gateway.protoc_gen_swagger.options\x1a*protoc-gen-swagger/options/openapiv2.proto\x1a google/protobuf/descriptor.proto:j\n\x11openapiv2_swagger\x12\x1c.google.protobuf.FileOptions\x18\x92\x08 \x01(\x0b\x32\x30.grpc.gateway.protoc_gen_swagger.options.Swagger:p\n\x13openapiv2_operation\x12\x1e.google.protobuf.MethodOptions\x18\x92\x08 \x01(\x0b\x32\x32.grpc.gateway.protoc_gen_swagger.options.Operation:k\n\x10openapiv2_schema\x12\x1f.google.protobuf.MessageOptions\x18\x92\x08 \x01(\x0b\x32/.grpc.gateway.protoc_gen_swagger.options.Schema:e\n\ropenapiv2_tag\x12\x1f.google.protobuf.ServiceOptions\x18\x92\x08 \x01(\x0b\x32,.grpc.gateway.protoc_gen_swagger.options.TagBCZAgithub.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/optionsb\x06proto3')
,
dependencies=[protoc__gen__swagger_dot_options_dot_openapiv2__pb2.DESCRIPTOR,google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
OPENAPIV2_SWAGGER_FIELD_NUMBER = 1042
openapiv2_swagger = _descriptor.FieldDescriptor(
name='openapiv2_swagger', full_name='grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger', index=0,
number=1042, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
OPENAPIV2_OPERATION_FIELD_NUMBER = 1042
openapiv2_operation = _descriptor.FieldDescriptor(
name='openapiv2_operation', full_name='grpc.gateway.protoc_gen_swagger.options.openapiv2_operation', index=1,
number=1042, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
OPENAPIV2_SCHEMA_FIELD_NUMBER = 1042
openapiv2_schema = _descriptor.FieldDescriptor(
name='openapiv2_schema', full_name='grpc.gateway.protoc_gen_swagger.options.openapiv2_schema', index=2,
number=1042, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
OPENAPIV2_TAG_FIELD_NUMBER = 1042
openapiv2_tag = _descriptor.FieldDescriptor(
name='openapiv2_tag', full_name='grpc.gateway.protoc_gen_swagger.options.openapiv2_tag', index=3,
number=1042, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
DESCRIPTOR.extensions_by_name['openapiv2_swagger'] = openapiv2_swagger
DESCRIPTOR.extensions_by_name['openapiv2_operation'] = openapiv2_operation
DESCRIPTOR.extensions_by_name['openapiv2_schema'] = openapiv2_schema
DESCRIPTOR.extensions_by_name['openapiv2_tag'] = openapiv2_tag
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
openapiv2_swagger.message_type = protoc__gen__swagger_dot_options_dot_openapiv2__pb2._SWAGGER
google_dot_protobuf_dot_descriptor__pb2.FileOptions.RegisterExtension(openapiv2_swagger)
openapiv2_operation.message_type = protoc__gen__swagger_dot_options_dot_openapiv2__pb2._OPERATION
google_dot_protobuf_dot_descriptor__pb2.MethodOptions.RegisterExtension(openapiv2_operation)
openapiv2_schema.message_type = protoc__gen__swagger_dot_options_dot_openapiv2__pb2._SCHEMA
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(openapiv2_schema)
openapiv2_tag.message_type = protoc__gen__swagger_dot_options_dot_openapiv2__pb2._TAG
google_dot_protobuf_dot_descriptor__pb2.ServiceOptions.RegisterExtension(openapiv2_tag)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('ZAgithub.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options'))
# @@protoc_insertion_point(module_scope)
| 59.5375 | 828 | 0.847995 |
79427b2c2339273f0918226289626cde98784e6c | 22,822 | py | Python | test_data/samples/sqlalchemy_base_output.py | jgberry/ssort | 65c4b0a1f2e9e93e65855967f9a438046b24d9e1 | [
"MIT"
] | null | null | null | test_data/samples/sqlalchemy_base_output.py | jgberry/ssort | 65c4b0a1f2e9e93e65855967f9a438046b24d9e1 | [
"MIT"
] | null | null | null | test_data/samples/sqlalchemy_base_output.py | jgberry/ssort | 65c4b0a1f2e9e93e65855967f9a438046b24d9e1 | [
"MIT"
] | null | null | null | # Taken from SQLAlchemy
# sql/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""Foundational utilities common to many sql modules.
"""
import itertools
import re
from .visitors import ClauseVisitor
from .. import exc
from .. import util
PARSE_AUTOCOMMIT = util.symbol("PARSE_AUTOCOMMIT")
NO_ARG = util.symbol("NO_ARG")
class Immutable(object):
"""mark a ClauseElement as 'immutable' when expressions are cloned."""
def unique_params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def _clone(self):
return self
def _from_objects(*elements):
return itertools.chain(*[element._from_objects for element in elements])
@util.decorator
def _generative(fn, *args, **kw):
"""Mark a method as generative."""
self = args[0]._generate()
fn(self, *args[1:], **kw)
return self
class _DialectArgView(util.collections_abc.MutableMapping):
"""A dictionary view of dialect-level arguments in the form
<dialectname>_<argument_name>.
"""
def __init__(self, obj):
self.obj = obj
def _key(self, key):
try:
dialect, value_key = key.split("_", 1)
except ValueError as err:
util.raise_(KeyError(key), replace_context=err)
else:
return dialect, value_key
def __getitem__(self, key):
dialect, value_key = self._key(key)
try:
opt = self.obj.dialect_options[dialect]
except exc.NoSuchModuleError as err:
util.raise_(KeyError(key), replace_context=err)
else:
return opt[value_key]
def __setitem__(self, key, value):
try:
dialect, value_key = self._key(key)
except KeyError as err:
util.raise_(
exc.ArgumentError(
"Keys must be of the form <dialectname>_<argname>"
),
replace_context=err,
)
else:
self.obj.dialect_options[dialect][value_key] = value
def __delitem__(self, key):
dialect, value_key = self._key(key)
del self.obj.dialect_options[dialect][value_key]
def __iter__(self):
return (
util.safe_kwarg("%s_%s" % (dialect_name, value_name))
for dialect_name in self.obj.dialect_options
for value_name in self.obj.dialect_options[
dialect_name
]._non_defaults
)
def __len__(self):
return sum(
len(args._non_defaults)
for args in self.obj.dialect_options.values()
)
class _DialectArgDict(util.collections_abc.MutableMapping):
"""A dictionary view of dialect-level arguments for a specific
dialect.
Maintains a separate collection of user-specified arguments
and dialect-specified default arguments.
"""
def __init__(self):
self._non_defaults = {}
self._defaults = {}
def __getitem__(self, key):
if key in self._non_defaults:
return self._non_defaults[key]
else:
return self._defaults[key]
def __setitem__(self, key, value):
self._non_defaults[key] = value
def __delitem__(self, key):
del self._non_defaults[key]
def __iter__(self):
return iter(set(self._non_defaults).union(self._defaults))
def __len__(self):
return len(set(self._non_defaults).union(self._defaults))
class DialectKWArgs(object):
"""Establish the ability for a class to have dialect-specific arguments
with defaults and constructor validation.
The :class:`.DialectKWArgs` interacts with the
:attr:`.DefaultDialect.construct_arguments` present on a dialect.
.. seealso::
:attr:`.DefaultDialect.construct_arguments`
"""
@util.dependencies("sqlalchemy.dialects")
def _kw_reg_for_dialect(dialects, dialect_name):
dialect_cls = dialects.registry.load(dialect_name)
if dialect_cls.construct_arguments is None:
return None
return dict(dialect_cls.construct_arguments)
_kw_registry = util.PopulateDict(_kw_reg_for_dialect)
@classmethod
def argument_for(cls, dialect_name, argument_name, default):
"""Add a new kind of dialect-specific keyword argument for this class.
E.g.::
Index.argument_for("mydialect", "length", None)
some_index = Index('a', 'b', mydialect_length=5)
The :meth:`.DialectKWArgs.argument_for` method is a per-argument
way adding extra arguments to the
:attr:`.DefaultDialect.construct_arguments` dictionary. This
dictionary provides a list of argument names accepted by various
schema-level constructs on behalf of a dialect.
New dialects should typically specify this dictionary all at once as a
data member of the dialect class. The use case for ad-hoc addition of
argument names is typically for end-user code that is also using
a custom compilation scheme which consumes the additional arguments.
:param dialect_name: name of a dialect. The dialect must be
locatable, else a :class:`.NoSuchModuleError` is raised. The
dialect must also include an existing
:attr:`.DefaultDialect.construct_arguments` collection, indicating
that it participates in the keyword-argument validation and default
system, else :class:`.ArgumentError` is raised. If the dialect does
not include this collection, then any keyword argument can be
specified on behalf of this dialect already. All dialects packaged
within SQLAlchemy include this collection, however for third party
dialects, support may vary.
:param argument_name: name of the parameter.
:param default: default value of the parameter.
.. versionadded:: 0.9.4
"""
construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name]
if construct_arg_dictionary is None:
raise exc.ArgumentError(
"Dialect '%s' does have keyword-argument "
"validation and defaults enabled configured" % dialect_name
)
if cls not in construct_arg_dictionary:
construct_arg_dictionary[cls] = {}
construct_arg_dictionary[cls][argument_name] = default
@util.memoized_property
def dialect_kwargs(self):
"""A collection of keyword arguments specified as dialect-specific
options to this construct.
The arguments are present here in their original ``<dialect>_<kwarg>``
format. Only arguments that were actually passed are included;
unlike the :attr:`.DialectKWArgs.dialect_options` collection, which
contains all options known by this dialect including defaults.
The collection is also writable; keys are accepted of the
form ``<dialect>_<kwarg>`` where the value will be assembled
into the list of options.
.. versionadded:: 0.9.2
.. versionchanged:: 0.9.4 The :attr:`.DialectKWArgs.dialect_kwargs`
collection is now writable.
.. seealso::
:attr:`.DialectKWArgs.dialect_options` - nested dictionary form
"""
return _DialectArgView(self)
@property
def kwargs(self):
"""A synonym for :attr:`.DialectKWArgs.dialect_kwargs`."""
return self.dialect_kwargs
def _kw_reg_for_dialect_cls(self, dialect_name):
construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name]
d = _DialectArgDict()
if construct_arg_dictionary is None:
d._defaults.update({"*": None})
else:
for cls in reversed(self.__class__.__mro__):
if cls in construct_arg_dictionary:
d._defaults.update(construct_arg_dictionary[cls])
return d
@util.memoized_property
def dialect_options(self):
"""A collection of keyword arguments specified as dialect-specific
options to this construct.
This is a two-level nested registry, keyed to ``<dialect_name>``
and ``<argument_name>``. For example, the ``postgresql_where``
argument would be locatable as::
arg = my_object.dialect_options['postgresql']['where']
.. versionadded:: 0.9.2
.. seealso::
:attr:`.DialectKWArgs.dialect_kwargs` - flat dictionary form
"""
return util.PopulateDict(
util.portable_instancemethod(self._kw_reg_for_dialect_cls)
)
def _validate_dialect_kwargs(self, kwargs):
# validate remaining kwargs that they all specify DB prefixes
if not kwargs:
return
for k in kwargs:
m = re.match("^(.+?)_(.+)$", k)
if not m:
raise TypeError(
"Additional arguments should be "
"named <dialectname>_<argument>, got '%s'" % k
)
dialect_name, arg_name = m.group(1, 2)
try:
construct_arg_dictionary = self.dialect_options[dialect_name]
except exc.NoSuchModuleError:
util.warn(
"Can't validate argument %r; can't "
"locate any SQLAlchemy dialect named %r"
% (k, dialect_name)
)
self.dialect_options[dialect_name] = d = _DialectArgDict()
d._defaults.update({"*": None})
d._non_defaults[arg_name] = kwargs[k]
else:
if (
"*" not in construct_arg_dictionary
and arg_name not in construct_arg_dictionary
):
raise exc.ArgumentError(
"Argument %r is not accepted by "
"dialect %r on behalf of %r"
% (k, dialect_name, self.__class__)
)
else:
construct_arg_dictionary[arg_name] = kwargs[k]
class Generative(object):
"""Allow a ClauseElement to generate itself via the
@_generative decorator.
"""
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class Executable(Generative):
"""Mark a ClauseElement as supporting execution.
:class:`.Executable` is a superclass for all "statement" types
of objects, including :func:`select`, :func:`delete`, :func:`update`,
:func:`insert`, :func:`text`.
"""
supports_execution = True
_execution_options = util.immutabledict()
_bind = None
@_generative
def execution_options(self, **kw):
""" Set non-SQL options for the statement which take effect during
execution.
Execution options can be set on a per-statement or
per :class:`_engine.Connection` basis. Additionally, the
:class:`_engine.Engine` and ORM :class:`~.orm.query.Query`
objects provide
access to execution options which they in turn configure upon
connections.
The :meth:`execution_options` method is generative. A new
instance of this statement is returned that contains the options::
statement = select([table.c.x, table.c.y])
statement = statement.execution_options(autocommit=True)
Note that only a subset of possible execution options can be applied
to a statement - these include "autocommit" and "stream_results",
but not "isolation_level" or "compiled_cache".
See :meth:`_engine.Connection.execution_options` for a full list of
possible options.
.. seealso::
:meth:`_engine.Connection.execution_options`
:meth:`_query.Query.execution_options`
:meth:`.Executable.get_execution_options`
"""
if "isolation_level" in kw:
raise exc.ArgumentError(
"'isolation_level' execution option may only be specified "
"on Connection.execution_options(), or "
"per-engine using the isolation_level "
"argument to create_engine()."
)
if "compiled_cache" in kw:
raise exc.ArgumentError(
"'compiled_cache' execution option may only be specified "
"on Connection.execution_options(), not per statement."
)
self._execution_options = self._execution_options.union(kw)
def get_execution_options(self):
""" Get the non-SQL options which will take effect during execution.
.. versionadded:: 1.3
.. seealso::
:meth:`.Executable.execution_options`
"""
return self._execution_options
def execute(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`.
"""
e = self.bind
if e is None:
label = getattr(self, "description", self.__class__.__name__)
msg = (
"This %s is not directly bound to a Connection or Engine. "
"Use the .execute() method of a Connection or Engine "
"to execute this construct." % label
)
raise exc.UnboundExecutionError(msg)
return e._execute_clauseelement(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`, returning the
result's scalar representation.
"""
return self.execute(*multiparams, **params).scalar()
@property
def bind(self):
"""Returns the :class:`_engine.Engine` or :class:`_engine.Connection`
to
which this :class:`.Executable` is bound, or None if none found.
This is a traversal which checks locally, then
checks among the "from" clauses of associated objects
until a bound engine or connection is found.
"""
if self._bind is not None:
return self._bind
for f in _from_objects(self):
if f is self:
continue
engine = f.bind
if engine is not None:
return engine
else:
return None
class SchemaEventTarget(object):
"""Base class for elements that are the targets of :class:`.DDLEvents`
events.
This includes :class:`.SchemaItem` as well as :class:`.SchemaType`.
"""
def _set_parent(self, parent):
"""Associate with this SchemaEvent's parent object."""
def _set_parent_with_dispatch(self, parent):
self.dispatch.before_parent_attach(self, parent)
self._set_parent(parent)
self.dispatch.after_parent_attach(self, parent)
class SchemaVisitor(ClauseVisitor):
"""Define the visiting for ``SchemaItem`` objects."""
__traverse_options__ = {"schema_visitor": True}
class ColumnCollection(util.OrderedProperties):
"""An ordered dictionary that stores a list of ColumnElement
instances.
Overrides the ``__eq__()`` method to produce SQL clauses between
sets of correlated columns.
"""
__slots__ = "_all_columns"
def __init__(self, *columns):
super(ColumnCollection, self).__init__()
object.__setattr__(self, "_all_columns", [])
for c in columns:
self.add(c)
def replace(self, column):
"""add the given column to this collection, removing unaliased
versions of this column as well as existing columns with the
same key.
e.g.::
t = Table('sometable', metadata, Column('col1', Integer))
t.columns.replace(Column('col1', Integer, key='columnone'))
will remove the original 'col1' from the collection, and add
the new column under the name 'columnname'.
Used by schema.Column to override columns during table reflection.
"""
remove_col = None
if column.name in self and column.key != column.name:
other = self[column.name]
if other.name == other.key:
remove_col = other
del self._data[other.key]
if column.key in self._data:
remove_col = self._data[column.key]
self._data[column.key] = column
if remove_col is not None:
self._all_columns[:] = [
column if c is remove_col else c for c in self._all_columns
]
else:
self._all_columns.append(column)
def add(self, column):
"""Add a column to this collection.
The key attribute of the column will be used as the hash key
for this dictionary.
"""
if not column.key:
raise exc.ArgumentError(
"Can't add unnamed column to column collection"
)
self[column.key] = column
def clear(self):
raise NotImplementedError()
def remove(self, column):
del self._data[column.key]
self._all_columns[:] = [
c for c in self._all_columns if c is not column
]
def update(self, iter_):
cols = list(iter_)
all_col_set = set(self._all_columns)
self._all_columns.extend(
c for label, c in cols if c not in all_col_set
)
self._data.update((label, c) for label, c in cols)
def extend(self, iter_):
cols = list(iter_)
all_col_set = set(self._all_columns)
self._all_columns.extend(c for c in cols if c not in all_col_set)
self._data.update((c.key, c) for c in cols)
def contains_column(self, col):
return col in set(self._all_columns)
def as_immutable(self):
return ImmutableColumnCollection(self._data, self._all_columns)
def __setattr__(self, key, obj):
raise NotImplementedError()
def __setitem__(self, key, value):
if key in self:
# this warning is primarily to catch select() statements
# which have conflicting column names in their exported
# columns collection
existing = self[key]
if existing is value:
return
if not existing.shares_lineage(value):
util.warn(
"Column %r on table %r being replaced by "
"%r, which has the same key. Consider "
"use_labels for select() statements."
% (key, getattr(existing, "table", None), value)
)
# pop out memoized proxy_set as this
# operation may very well be occurring
# in a _make_proxy operation
util.memoized_property.reset(value, "proxy_set")
self._all_columns.append(value)
self._data[key] = value
def __delitem__(self, key):
raise NotImplementedError()
def __contains__(self, other):
if not isinstance(other, util.string_types):
raise exc.ArgumentError("__contains__ requires a string argument")
return util.OrderedProperties.__contains__(self, other)
@util.dependencies("sqlalchemy.sql.elements")
def __eq__(self, elements, other):
l = []
for c in getattr(other, "_all_columns", other):
for local in self._all_columns:
if c.shares_lineage(local):
l.append(c == local)
return elements.and_(*l)
__hash__ = None
def __getstate__(self):
return {"_data": self._data, "_all_columns": self._all_columns}
def __setstate__(self, state):
object.__setattr__(self, "_data", state["_data"])
object.__setattr__(self, "_all_columns", state["_all_columns"])
def __str__(self):
return repr([str(c) for c in self])
class ImmutableColumnCollection(util.ImmutableProperties, ColumnCollection):
extend = remove = util.ImmutableProperties._immutable
def __init__(self, data, all_columns):
util.ImmutableProperties.__init__(self, data)
object.__setattr__(self, "_all_columns", all_columns)
class ColumnSet(util.ordered_column_set):
def contains_column(self, col):
return col in self
def extend(self, cols):
for col in cols:
self.add(col)
def __add__(self, other):
return list(self) + list(other)
@util.dependencies("sqlalchemy.sql.elements")
def __eq__(self, elements, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c == local)
return elements.and_(*l)
def __hash__(self):
return hash(tuple(x for x in self))
def _bind_or_error(schemaitem, msg=None):
bind = schemaitem.bind
if not bind:
name = schemaitem.__class__.__name__
label = getattr(
schemaitem, "fullname", getattr(schemaitem, "name", None)
)
if label:
item = "%s object %r" % (name, label)
else:
item = "%s object" % name
if msg is None:
msg = (
"%s is not bound to an Engine or Connection. "
"Execution can not proceed without a database to execute "
"against." % item
)
raise exc.UnboundExecutionError(msg)
return bind
| 32.696275 | 78 | 0.62225 |
79427b8aa41aad4eba456c91c479ab0a9aae64dd | 858 | py | Python | tests/test_add_contact.py | leanor13/python_training | 0e933a35f0829ee5775158049d5d252fdc54a054 | [
"Apache-2.0"
] | null | null | null | tests/test_add_contact.py | leanor13/python_training | 0e933a35f0829ee5775158049d5d252fdc54a054 | [
"Apache-2.0"
] | null | null | null | tests/test_add_contact.py | leanor13/python_training | 0e933a35f0829ee5775158049d5d252fdc54a054 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from model.contact import Contact
import re
def test_contact_creation(app, db, json_contacts, check_ui):
contact = json_contacts
old_contacts = db.get_contact_list()
app.contact.create(contact)
new_contacts = db.get_contact_list()
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
def clean(cn):
return Contact(contact_id=cn.contact_id, first_name=re.sub(r'\s+', ' ', cn.first_name.strip()),
last_name=re.sub(r'\s+', ' ', cn.last_name.strip()))
ui_contacts = sorted(app.contact.get_simple_contact_list(), key=Contact.id_or_max)
db_contacts = sorted(map(clean, db.get_contact_list()), key=Contact.id_or_max)
assert ui_contacts == db_contacts
| 39 | 107 | 0.681818 |
79427c369f7da1ddfbe00ae2bcb867cb1ee93881 | 3,406 | py | Python | env/Lib/site-packages/plotly/graph_objs/histogram2d/_marker.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 7 | 2022-01-16T12:28:16.000Z | 2022-03-04T15:31:45.000Z | packages/python/plotly/plotly/graph_objs/histogram2d/_marker.py | jiangrongbo/plotly.py | df19fc702b309586cc24e25373b87e8bdbb3ff60 | [
"MIT"
] | 14 | 2021-10-20T23:33:47.000Z | 2021-12-21T04:50:37.000Z | packages/python/plotly/plotly/graph_objs/histogram2d/_marker.py | jiangrongbo/plotly.py | df19fc702b309586cc24e25373b87e8bdbb3ff60 | [
"MIT"
] | 1 | 2021-11-29T22:55:05.000Z | 2021-11-29T22:55:05.000Z | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram2d"
_path_str = "histogram2d.marker"
_valid_props = {"color", "colorsrc"}
# color
# -----
@property
def color(self):
"""
Sets the aggregation data.
The 'color' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the aggregation data.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
"""
def __init__(self, arg=None, color=None, colorsrc=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram2d.Marker`
color
Sets the aggregation data.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram2d.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2d.Marker`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 26.2 | 82 | 0.521726 |
79427c81f654f0c7776ef35323f6a43d8019d898 | 8,869 | py | Python | code/test/02_processing_bootstrapping.py | data-intelligence-for-health-lab/delirium_prediction | a0a25819ef6c98e32563b4e3b986c1a26fc30ed7 | [
"MIT"
] | null | null | null | code/test/02_processing_bootstrapping.py | data-intelligence-for-health-lab/delirium_prediction | a0a25819ef6c98e32563b4e3b986c1a26fc30ed7 | [
"MIT"
] | null | null | null | code/test/02_processing_bootstrapping.py | data-intelligence-for-health-lab/delirium_prediction | a0a25819ef6c98e32563b4e3b986c1a26fc30ed7 | [
"MIT"
] | null | null | null |
# --- loading libraries -------------------------------------------------------
import numpy as np
import pandas as pd
import pickle
import tensorflow as tf
import random
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score, roc_auc_score, auc, precision_recall_curve
import sys
# ------------------------------------------------------ loading libraries ----
# --- setting random seed -----------------------------------------------------
seed_n = 42
np.random.seed(seed_n)
random.seed(seed_n)
tf.random.set_seed(seed_n)
# ----------------------------------------------------- setting random seed ---
# --- main routine ------------------------------------------------------------
# Argument
n = int(sys.argv[1])
# Mounting output dataframe
output = pd.DataFrame(columns = ['n',
'threshold',
'calibration',
'tn_12h',
'fp_12h',
'fn_12h',
'tp_12h',
'auc_12h',
'sensitivity_12h',
'specificity_12h',
'f1_score_12h',
'precision_12h',
'recall_12h',
'precision_recall_auc_12h',
'tn_24h',
'fp_24h',
'fn_24h',
'tp_24h',
'auc_24h',
'sensitivity_24h',
'specificity_24h',
'f1_score_24h',
'precision_24h',
'recall_24h',
'precision_recall_auc_24h',
'auc_mean',
'sensitivity_mean',
'specificity_mean',
'f1_score_mean',
'precision_mean',
'recall_mean',
'precision_recall_auc_mean'])
idx = 0
# Mounting model & data
# loading model
model = tf.keras.models.load_model('/project/M-ABeICU176709/delirium/data/outputs/models/003057/model.hdf5')
# loading data
X_adm = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/bootstrapping/X_adm5y_test_'+str(n)+'.pickle', 'rb'))
X_temp = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/bootstrapping/X_temp_test_'+str(n)+'.pickle', 'rb'))
y_12h = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/bootstrapping/y_12h_test_'+str(n)+'.pickle', 'rb'))
y_24h = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/bootstrapping/y_24h_test_'+str(n)+'.pickle', 'rb'))
# -----------------------------------------------------------------------------
# Predicting y_12h and y_24h
results = model.predict([X_adm, X_temp],
verbose = 1)
y_12h_pred = results[0]
y_24h_pred = results[1]
# Applying calibrators (isotonic regression)
# 12h
y_12h_pred = [x[0] for x in y_12h_pred]
ir_12h = pickle.load(open('/project/M-ABeICU176709/delirium/data/outputs/calibration/calibrators/3057_ir_12h.pickle', 'rb'))
y_12h_pred = ir_12h.transform(y_12h_pred)
# 24h
y_24h_pred = [x[0] for x in y_24h_pred]
ir_24h = pickle.load(open('/project/M-ABeICU176709/delirium/data/outputs/calibration/calibrators/3057_ir_24h.pickle', 'rb'))
y_24h_pred = ir_24h.transform(y_24h_pred)
# -----------------------------------------------------------------------------
# auc - 12h
auc_12h = roc_auc_score(y_true = y_12h,
y_score = y_12h_pred)
# auc - 24h
auc_24h = roc_auc_score(y_true = y_24h,
y_score = y_24h_pred)
# processing thresholds
thresholds = list(np.arange(0, 1.05, 0.05))
for threshold in thresholds:
print(f'N: {n}. Threshold: {threshold}.')
# Adjusting values to be 0 or 1 according to threshold
y_12h_pred_temp = list(map(lambda x: 1 if x >= threshold else 0, y_12h_pred))
y_24h_pred_temp = list(map(lambda x: 1 if x >= threshold else 0, y_24h_pred))
# Evaluating predictions
# confusion matrix - 12h
tn_12h, fp_12h, fn_12h, tp_12h = confusion_matrix(y_true = y_12h,
y_pred = y_12h_pred_temp).ravel()
# confusion matrix - 24h
tn_24h, fp_24h, fn_24h, tp_24h = confusion_matrix(y_true = y_24h,
y_pred = y_24h_pred_temp).ravel()
# f1-score - 12h
f1_score_12h = f1_score(y_true = y_12h,
y_pred = y_12h_pred_temp,
zero_division = 0)
# f1-score - 24h
f1_score_24h = f1_score(y_true = y_24h,
y_pred = y_24h_pred_temp,
zero_division = 0)
# precision - 12h
precision_12h = precision_score(y_true = y_12h,
y_pred = y_12h_pred_temp,
zero_division = 0)
# precision - 24h
precision_24h = precision_score(y_true = y_24h,
y_pred = y_24h_pred_temp,
zero_division = 0)
# sensitivity / recall - 12h
recall_12h = recall_score(y_true = y_12h,
y_pred = y_12h_pred_temp,
zero_division = 0)
# sensitivity / recall - 24h
recall_24h = recall_score(y_true = y_24h,
y_pred = y_24h_pred_temp,
zero_division = 0)
# precision_recall_auc 12h
precision_12h_auc, recall_12h_auc, _ = precision_recall_curve(y_true = y_12h,
probas_pred = y_12h_pred_temp)
precision_recall_auc_12h = auc(recall_12h_auc, precision_12h_auc)
# precision_recall_auc 24h
precision_24h_auc, recall_24h_auc, _ = precision_recall_curve(y_true = y_24h,
probas_pred = y_24h_pred_temp)
precision_recall_auc_24h = auc(recall_24h_auc, precision_24h_auc)
# specificity 12h
specificity_12h = tn_12h / (tn_12h + fp_12h)
# specificity 24h
specificity_24h = tn_24h / (tn_24h + fp_24h)
# -----------------------------------------------------------------------------
# Saving results to output
output.loc[idx, 'n'] = n
output.loc[idx, 'threshold'] = threshold
output.loc[idx, 'calibration'] = 'Isotonic Regression'
output.loc[idx, 'tn_12h'] = tn_12h
output.loc[idx, 'fp_12h'] = fp_12h
output.loc[idx, 'fn_12h'] = fn_12h
output.loc[idx, 'tp_12h'] = tp_12h
output.loc[idx, 'auc_12h'] = auc_12h
output.loc[idx, 'sensitivity_12h'] = recall_12h
output.loc[idx, 'specificity_12h'] = specificity_12h
output.loc[idx, 'f1_score_12h'] = f1_score_12h
output.loc[idx, 'precision_12h'] = precision_12h
output.loc[idx, 'recall_12h'] = recall_12h
output.loc[idx, 'precision_recall_auc_12h'] = precision_recall_auc_12h
output.loc[idx, 'tn_24h'] = tn_24h
output.loc[idx, 'fp_24h'] = fp_24h
output.loc[idx, 'fn_24h'] = fn_24h
output.loc[idx, 'tp_24h'] = tp_24h
output.loc[idx, 'auc_24h'] = auc_24h
output.loc[idx, 'sensitivity_24h'] = recall_24h
output.loc[idx, 'specificity_24h'] = specificity_24h
output.loc[idx, 'f1_score_24h'] = f1_score_24h
output.loc[idx, 'precision_24h'] = precision_24h
output.loc[idx, 'recall_24h'] = recall_24h
output.loc[idx, 'precision_recall_auc_24h'] = precision_recall_auc_24h
output.loc[idx, 'auc_mean'] = (auc_12h + auc_24h) / 2
output.loc[idx, 'sensitivity_mean'] = (recall_12h + recall_24h) / 2
output.loc[idx, 'specificity_mean'] = (specificity_12h + specificity_24h) / 2
output.loc[idx, 'f1_score_mean'] = (f1_score_12h + f1_score_24h) / 2
output.loc[idx, 'precision_mean'] = (precision_12h + precision_24h) / 2
output.loc[idx, 'recall_mean'] = (recall_12h + recall_24h) / 2
output.loc[idx, 'precision_recall_auc_mean'] = (precision_recall_auc_12h + precision_recall_auc_24h) / 2
# updating idx
idx += 1
# -----------------------------------------------------------------------------
# Saving results to file
output.to_csv(f'/project/M-ABeICU176709/delirium/data/outputs/test/bootstrapping/results_{str(n)}.csv', index = False)
print(output)
# ------------------------------------------------------------ main routine ---
| 42.233333 | 138 | 0.526553 |
79427d4aa9d24eb5fc484fd7a5de05e891fc7c88 | 3,787 | py | Python | code/node2vec.py | CLAIRE-COVID-T4/covid-data | ccdf1397b945b63e95768a7b91f0a7bad6e5085d | [
"CC-BY-4.0"
] | 4 | 2020-05-17T21:29:19.000Z | 2021-08-09T00:59:29.000Z | code/node2vec.py | CLAIRE-COVID-T4/covid-data | ccdf1397b945b63e95768a7b91f0a7bad6e5085d | [
"CC-BY-4.0"
] | null | null | null | code/node2vec.py | CLAIRE-COVID-T4/covid-data | ccdf1397b945b63e95768a7b91f0a7bad6e5085d | [
"CC-BY-4.0"
] | null | null | null | """ Taken from https://github.com/aditya-grover/node2vec and adapted to run on Python 3+"""
import numpy as np
import networkx as nx
import random
class Graph():
def __init__(self, nx_G, is_directed, p, q):
self.G = nx_G
self.is_directed = is_directed
self.p = p
self.q = q
def node2vec_walk(self, walk_length, start_node):
r"""
Simulate a random walk starting from start node.
"""
G = self.G
alias_nodes = self.alias_nodes
alias_edges = self.alias_edges
walk = [start_node]
while len(walk) < walk_length:
cur = walk[-1]
cur_nbrs = sorted(G.neighbors(cur))
if len(cur_nbrs) > 0:
if len(walk) == 1:
walk.append(cur_nbrs[alias_draw(alias_nodes[cur][0], alias_nodes[cur][1])])
else:
prev = walk[-2]
next = cur_nbrs[alias_draw(alias_edges[(prev, cur)][0],
alias_edges[(prev, cur)][1])]
walk.append(next)
else:
break
return walk
def simulate_walks(self, num_walks, walk_length):
r"""Repeatedly simulate random walks from each node."""
G = self.G
walks = []
nodes = list(G.nodes())
print('Walk iteration:')
for walk_iter in range(num_walks):
print(str(walk_iter+1), '/', str(num_walks))
random.shuffle(nodes)
for node in nodes:
walks.append(self.node2vec_walk(walk_length=walk_length, start_node=node))
return walks
def get_alias_edge(self, src, dst):
r"""Get the alias edge setup lists for a given edge."""
G = self.G
p = self.p
q = self.q
unnormalized_probs = []
for dst_nbr in sorted(G.neighbors(dst)):
if dst_nbr == src:
unnormalized_probs.append(G[dst][dst_nbr]['weight']/p)
elif G.has_edge(dst_nbr, src):
unnormalized_probs.append(G[dst][dst_nbr]['weight'])
else:
unnormalized_probs.append(G[dst][dst_nbr]['weight']/q)
norm_const = sum(unnormalized_probs)
normalized_probs = [float(u_prob)/norm_const for u_prob in unnormalized_probs]
return alias_setup(normalized_probs)
def preprocess_transition_probs(self):
r"""Preprocessing of transition probabilities for guiding the random walks."""
G = self.G
is_directed = self.is_directed
alias_nodes = {}
for node in G.nodes():
unnormalized_probs = [G[node][nbr]['weight'] for nbr in sorted(G.neighbors(node))]
norm_const = sum(unnormalized_probs)
normalized_probs = [float(u_prob)/norm_const for u_prob in unnormalized_probs]
alias_nodes[node] = alias_setup(normalized_probs)
alias_edges = {}
triads = {}
if is_directed:
for edge in G.edges():
alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])
else:
for edge in G.edges():
alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])
alias_edges[(edge[1], edge[0])] = self.get_alias_edge(edge[1], edge[0])
self.alias_nodes = alias_nodes
self.alias_edges = alias_edges
return
def alias_setup(probs):
r"""
Compute utility lists for non-uniform sampling from discrete distributions.
Refer to https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
for details
"""
K = len(probs)
q = np.zeros(K)
J = np.zeros(K, dtype=np.int)
smaller = []
larger = []
for kk, prob in enumerate(probs):
q[kk] = K*prob
if q[kk] < 1.0:
smaller.append(kk)
else:
larger.append(kk)
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
J[small] = large
q[large] = q[large] + q[small] - 1.0
if q[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
return J, q
def alias_draw(J, q):
r"""Draw sample from a non-uniform discrete distribution using alias sampling."""
K = len(J)
kk = int(np.floor(np.random.rand()*K))
if np.random.rand() < q[kk]:
return kk
else:
return J[kk]
| 25.246667 | 120 | 0.670452 |
79427dca3c20f750423b33788aec901c8cdb3529 | 1,582 | py | Python | extraPackages/Pillow-6.0.0/Tests/bench_cffi_access.py | dolboBobo/python3_ios | 877f8c2c5890f26292ddd14909bea62a04fe2889 | [
"BSD-3-Clause"
] | 130 | 2018-02-03T10:25:54.000Z | 2022-03-25T22:27:22.000Z | extraPackages/Pillow-6.0.0/Tests/bench_cffi_access.py | doc22940/python3_ios | 877f8c2c5890f26292ddd14909bea62a04fe2889 | [
"BSD-3-Clause"
] | 9 | 2018-12-14T07:31:42.000Z | 2020-12-09T20:29:28.000Z | extraPackages/Pillow-6.0.0/Tests/bench_cffi_access.py | doc22940/python3_ios | 877f8c2c5890f26292ddd14909bea62a04fe2889 | [
"BSD-3-Clause"
] | 64 | 2018-04-25T08:51:57.000Z | 2022-01-29T14:13:57.000Z | from .helper import unittest, PillowTestCase, hopper
# Not running this test by default. No DOS against Travis CI.
from PIL import PyAccess
import time
def iterate_get(size, access):
(w, h) = size
for x in range(w):
for y in range(h):
access[(x, y)]
def iterate_set(size, access):
(w, h) = size
for x in range(w):
for y in range(h):
access[(x, y)] = (x % 256, y % 256, 0)
def timer(func, label, *args):
iterations = 5000
starttime = time.time()
for x in range(iterations):
func(*args)
if time.time()-starttime > 10:
print("%s: breaking at %s iterations, %.6f per iteration" % (
label, x+1, (time.time()-starttime)/(x+1.0)))
break
if x == iterations-1:
endtime = time.time()
print("%s: %.4f s %.6f per iteration" % (
label, endtime-starttime, (endtime-starttime)/(x+1.0)))
class BenchCffiAccess(PillowTestCase):
def test_direct(self):
im = hopper()
im.load()
# im = Image.new( "RGB", (2000, 2000), (1, 3, 2))
caccess = im.im.pixel_access(False)
access = PyAccess.new(im, False)
self.assertEqual(caccess[(0, 0)], access[(0, 0)])
print("Size: %sx%s" % im.size)
timer(iterate_get, 'PyAccess - get', im.size, access)
timer(iterate_set, 'PyAccess - set', im.size, access)
timer(iterate_get, 'C-api - get', im.size, caccess)
timer(iterate_set, 'C-api - set', im.size, caccess)
if __name__ == '__main__':
unittest.main()
| 26.813559 | 73 | 0.565107 |
79427e0cfea1bb9873fbcd6a20700f01493fe03f | 1,289 | py | Python | utils/others.py | MSwML/ML-guided-material-synthesis | 8c0ae4a4f6403bcc6833e959f549ab11c9874fe6 | [
"MIT"
] | 2 | 2020-11-10T01:45:28.000Z | 2021-05-30T03:32:26.000Z | utils/others.py | MSwML/ML-guided-material-synthesis | 8c0ae4a4f6403bcc6833e959f549ab11c9874fe6 | [
"MIT"
] | null | null | null | utils/others.py | MSwML/ML-guided-material-synthesis | 8c0ae4a4f6403bcc6833e959f549ab11c9874fe6 | [
"MIT"
] | 1 | 2021-01-23T04:34:50.000Z | 2021-01-23T04:34:50.000Z | import shap
import numpy as np
import pandas as pd
from utils import data_handler
def extract_feature_importance(model,X,title):
print('Feature importance...')
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(X)
shap.summary_plot(shap_values, feature_names=X.columns, plot_type="bar")
# normalize importance values
sum_col = abs(shap_values).sum(axis=0)
imp = np.array(sum_col/sum_col.sum())
ind = np.argsort(imp)[::-1]
sorted_imp = imp[ind]
sorted_feature = X.columns[ind]
feature_imp_sorted = pd.DataFrame( [sorted_imp],columns=sorted_feature)
print(feature_imp_sorted)
data_handler.save_csv(feature_imp_sorted,title=title+'feature_imp_sorted')
def predict_fake_input(model, task, title):
generated = data_handler.load_fake_input(task)
print('Number of generated conditions : ',generated.shape)
if(task==0):
pred = model.predict_proba(generated)
final_state = pd.Series( pred[:,1], name='Pred_Result')
elif(task==1):
pred = model.predict(generated)
final_state = pd.Series( pred, name='Pred_Result')
result = pd.concat([generated, final_state], axis=1)
data_handler.save_csv(result,title+'pred_fake_input') | 28.644444 | 78 | 0.694337 |
79427e37762155d108c16efd1ee009b67587edcb | 9,814 | py | Python | test/color/test_rgb.py | connorlee77/kornia | af5b1f76bedf2a7fc0e0da2386b1be3032b6534f | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-10-21T05:13:51.000Z | 2021-10-21T05:13:51.000Z | test/color/test_rgb.py | connorlee77/kornia | af5b1f76bedf2a7fc0e0da2386b1be3032b6534f | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2022-03-12T01:08:10.000Z | 2022-03-12T01:08:10.000Z | test/color/test_rgb.py | connorlee77/kornia | af5b1f76bedf2a7fc0e0da2386b1be3032b6534f | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-02-09T02:19:32.000Z | 2021-02-09T02:19:32.000Z | import kornia
import kornia.testing as utils # test utils
from test.common import device
import torch
from torch.autograd import gradcheck
from torch.testing import assert_allclose
import pytest
class TestRgbToRgba:
def test_smoke(self, device):
data = torch.rand(3, 4, 4).to(device)
assert kornia.rgb_to_rgba(data, 0.).shape == (4, 4, 4)
def test_back_and_forth_rgb(self, device):
a_val: float = 1.
x_rgb = torch.rand(3, 4, 4).to(device)
x_rgba = kornia.rgb_to_rgba(x_rgb, a_val)
x_rgb_new = kornia.rgba_to_rgb(x_rgba)
assert_allclose(x_rgb, x_rgb_new)
def test_back_and_forth_bgr(self, device):
a_val: float = 1.
x_bgr = torch.rand(3, 4, 4).to(device)
x_rgba = kornia.bgr_to_rgba(x_bgr, a_val)
x_bgr_new = kornia.rgba_to_bgr(x_rgba)
assert_allclose(x_bgr, x_bgr_new)
def test_bgr(self, device):
a_val: float = 1.
x_rgb = torch.rand(3, 4, 4).to(device)
x_bgr = kornia.rgb_to_bgr(x_rgb)
x_rgba = kornia.rgb_to_rgba(x_rgb, a_val)
x_rgba_new = kornia.bgr_to_rgba(x_bgr, a_val)
assert_allclose(x_rgba, x_rgba_new)
def test_single(self, device):
data = torch.tensor([[[1., 1.],
[1., 1.]],
[[2., 2.],
[2., 2.]],
[[3., 3.],
[3., 3.]]]) # 3x2x2
data = data.to(device)
aval: float = 0.4
expected = torch.tensor([[[1.0, 1.0],
[1.0, 1.0]],
[[2.0, 2.0],
[2.0, 2.0]],
[[3.0, 3.0],
[3.0, 3.0]],
[[0.4, 0.4],
[0.4, 0.4]]]) # 4x2x2
expected = expected.to(device)
assert_allclose(kornia.rgb_to_rgba(data, aval), expected)
def test_batch(self, device):
data = torch.tensor([[[[1., 1.],
[1., 1.]],
[[2., 2.],
[2., 2.]],
[[3., 3.],
[3., 3.]]],
[[[1., 1.],
[1., 1.]],
[[2., 2.],
[2., 2.]],
[[3., 3.],
[3., 3.]]]]) # 2x3x2x2
data = data.to(device)
aval: float = 45.
expected = torch.tensor([[[[1.0, 1.0],
[1.0, 1.0]],
[[2.0, 2.0],
[2.0, 2.0]],
[[3.0, 3.0],
[3.0, 3.0]],
[[45., 45.],
[45., 45.]]],
[[[1.0, 1.0],
[1.0, 1.0]],
[[2.0, 2.0],
[2.0, 2.0]],
[[3.0, 3.0],
[3.0, 3.0]],
[[45., 45.],
[45., 45.]]]])
expected = expected.to(device)
assert_allclose(kornia.rgb_to_rgba(data, aval), expected)
def test_gradcheck(self, device):
data = torch.rand(1, 3, 2, 2).to(device)
data = utils.tensor_to_gradcheck_var(data) # to var
assert gradcheck(kornia.color.RgbToRgba(1.), (data,), raise_exception=True)
class TestBgrToRgb:
def test_back_and_forth(self, device):
data_bgr = torch.rand(1, 3, 3, 2).to(device)
data_rgb = kornia.bgr_to_rgb(data_bgr)
data_bgr_new = kornia.rgb_to_bgr(data_rgb)
assert_allclose(data_bgr, data_bgr_new)
def test_bgr_to_rgb(self, device):
data = torch.tensor([[[1., 1.], [1., 1.]],
[[2., 2.], [2., 2.]],
[[3., 3.], [3., 3.]]]) # 3x2x2
expected = torch.tensor([[[3., 3.], [3., 3.]],
[[2., 2.], [2., 2.]],
[[1., 1.], [1., 1.]]]) # 3x2x2
# move data to the device
data = data.to(device)
expected = expected.to(device)
f = kornia.color.BgrToRgb()
assert_allclose(f(data), expected)
def test_batch_bgr_to_rgb(self, device):
data = torch.tensor([[[[1., 1.], [1., 1.]],
[[2., 2.], [2., 2.]],
[[3., 3.], [3., 3.]]],
[[[1., 1.], [1., 1.]],
[[2., 2.], [2., 2.]],
[[3., 3.], [3., 3.]]]]) # 2x3x2x2
expected = torch.tensor([[[[3., 3.], [3., 3.]],
[[2., 2.], [2., 2.]],
[[1., 1.], [1., 1.]]],
[[[3., 3.], [3., 3.]],
[[2., 2.], [2., 2.]],
[[1., 1.], [1., 1.]]]]) # 2x3x2x2
# move data to the device
data = data.to(device)
expected = expected.to(device)
f = kornia.color.BgrToRgb()
out = f(data)
assert_allclose(out, expected)
def test_gradcheck(self, device):
data = torch.tensor([[[1., 1.], [1., 1.]],
[[2., 2.], [2., 2.]],
[[3., 3.], [3., 3.]]]) # 3x2x2
data = data.to(device)
data = utils.tensor_to_gradcheck_var(data) # to var
assert gradcheck(kornia.color.BgrToRgb(), (data,), raise_exception=True)
@pytest.mark.skip(reason="turn off all jit for a while")
def test_jit(self, device):
@torch.jit.script
def op_script(data: torch.Tensor) -> torch.Tensor:
return kornia.bgr_to_rgb(data)
data = torch.Tensor([[[1., 1.], [1., 1.]],
[[2., 2.], [2., 2.]],
[[3., 3.], [3., 3.]]]) # 3x2x2
actual = op_script(data)
expected = kornia.bgr_to_rgb(data)
assert_allclose(actual, expected)
class TestRgbToBgr:
def test_back_and_forth(self, device):
data_rgb = torch.rand(1, 3, 3, 2).to(device)
data_bgr = kornia.rgb_to_bgr(data_rgb)
data_rgb_new = kornia.bgr_to_rgb(data_bgr)
assert_allclose(data_rgb, data_rgb_new)
def test_rgb_to_bgr(self, device):
# prepare input data
data = torch.tensor([[[1., 1.],
[1., 1.]],
[[2., 2.],
[2., 2.]],
[[3., 3.],
[3., 3.]]]) # 3x2x2
expected = torch.tensor([[[3., 3.],
[3., 3.]],
[[2., 2.],
[2., 2.]],
[[1., 1.],
[1., 1.]]]) # 3x2x2
# move data to the device
data = data.to(device)
expected = expected.to(device)
f = kornia.color.RgbToBgr()
assert_allclose(f(data), expected)
def test_gradcheck(self, device):
# prepare input data
data = torch.tensor([[[1., 1.],
[1., 1.]],
[[2., 2.],
[2., 2.]],
[[3., 3.],
[3., 3.]]]) # 3x2x2
data = data.to(device)
data = utils.tensor_to_gradcheck_var(data) # to var
assert gradcheck(kornia.color.RgbToBgr(), (data,),
raise_exception=True)
@pytest.mark.skip(reason="turn off all jit for a while")
def test_jit(self):
@torch.jit.script
def op_script(data: torch.Tensor) -> torch.Tensor:
return kornia.rgb_to_bgr(data)
data = torch.Tensor([[[1., 1.], [1., 1.]],
[[2., 2.], [2., 2.]],
[[3., 3.], [3., 3.]]]) # 3x2x
actual = op_script(data)
expected = kornia.rgb_to_bgr(data)
assert_allclose(actual, expected)
def test_batch_rgb_to_bgr(self, device):
# prepare input data
data = torch.tensor([[[[1., 1.],
[1., 1.]],
[[2., 2.],
[2., 2.]],
[[3., 3.],
[3., 3.]]],
[[[1., 1.],
[1., 1.]],
[[2., 2.],
[2., 2.]],
[[3., 3.],
[3., 3.]]]]) # 2x3x2x2
expected = torch.tensor([[[[3., 3.],
[3., 3.]],
[[2., 2.],
[2., 2.]],
[[1., 1.],
[1., 1.]]],
[[[3., 3.],
[3., 3.]],
[[2., 2.],
[2., 2.]],
[[1., 1.],
[1., 1.]]]]) # 2x3x2x2
# move data to the device
data = data.to(device)
expected = expected.to(device)
f = kornia.color.RgbToBgr()
out = f(data)
assert_allclose(out, expected)
| 31.863636 | 83 | 0.3602 |
79427fb72b17238219878925957b3b6e0e4ff7bb | 4,160 | py | Python | tests/unitary/GaugeController/test_gauges_weights.py | AqualisDAO/curve-dao-contracts | beec73a068da8ed01c0f710939dc5adb776d565b | [
"MIT"
] | 217 | 2020-06-24T14:01:21.000Z | 2022-03-29T08:35:24.000Z | tests/unitary/GaugeController/test_gauges_weights.py | AqualisDAO/curve-dao-contracts | beec73a068da8ed01c0f710939dc5adb776d565b | [
"MIT"
] | 25 | 2020-06-24T09:39:02.000Z | 2022-03-22T17:03:00.000Z | tests/unitary/GaugeController/test_gauges_weights.py | AqualisDAO/curve-dao-contracts | beec73a068da8ed01c0f710939dc5adb776d565b | [
"MIT"
] | 110 | 2020-07-10T22:45:49.000Z | 2022-03-29T02:51:08.000Z | import brownie
WEEK = 7 * 86400
YEAR = 365 * 86400
TYPE_WEIGHTS = [5 * 10 ** 17, 2 * 10 ** 18]
GAUGE_WEIGHTS = [2 * 10 ** 18, 10 ** 18, 5 * 10 ** 17]
def test_add_gauges(accounts, gauge_controller, three_gauges):
gauge_controller.add_gauge(three_gauges[0], 0, {"from": accounts[0]})
gauge_controller.add_gauge(three_gauges[1], 0, {"from": accounts[0]})
assert gauge_controller.gauges(0) == three_gauges[0]
assert gauge_controller.gauges(1) == three_gauges[1]
def test_n_gauges(accounts, gauge_controller, three_gauges):
assert gauge_controller.n_gauges() == 0
gauge_controller.add_gauge(three_gauges[0], 0, {"from": accounts[0]})
gauge_controller.add_gauge(three_gauges[1], 0, {"from": accounts[0]})
assert gauge_controller.n_gauges() == 2
def test_n_gauges_same_gauge(accounts, gauge_controller, three_gauges):
assert gauge_controller.n_gauges() == 0
gauge_controller.add_gauge(three_gauges[0], 0, {"from": accounts[0]})
with brownie.reverts("dev: cannot add the same gauge twice"):
gauge_controller.add_gauge(three_gauges[0], 0, {"from": accounts[0]})
assert gauge_controller.n_gauges() == 1
def test_n_gauge_types(gauge_controller, accounts, three_gauges):
assert gauge_controller.n_gauge_types() == 1
gauge_controller.add_type(b"Insurance", {"from": accounts[0]})
assert gauge_controller.n_gauge_types() == 2
def test_gauge_types(accounts, gauge_controller, three_gauges):
gauge_controller.add_type(b"Insurance", {"from": accounts[0]})
gauge_controller.add_gauge(three_gauges[0], 1, {"from": accounts[0]})
gauge_controller.add_gauge(three_gauges[1], 0, {"from": accounts[0]})
assert gauge_controller.gauge_types(three_gauges[0]) == 1
assert gauge_controller.gauge_types(three_gauges[1]) == 0
def test_gauge_weight(accounts, gauge_controller, gauge):
gauge_controller.add_gauge(gauge, 0, 10 ** 19, {"from": accounts[0]})
assert gauge_controller.get_gauge_weight.call(gauge) == 10 ** 19
def test_gauge_weight_as_zero(accounts, gauge_controller, gauge):
gauge_controller.add_gauge(gauge, 0, {"from": accounts[0]})
assert gauge_controller.get_gauge_weight.call(gauge) == 0
def test_set_gauge_weight(chain, accounts, gauge_controller, gauge):
gauge_controller.add_gauge(gauge, 0, {"from": accounts[0]})
gauge_controller.change_gauge_weight(gauge, 10 ** 21)
chain.sleep(WEEK)
assert gauge_controller.get_gauge_weight(gauge) == 10 ** 21
def test_type_weight(accounts, gauge_controller):
gauge_controller.add_type(b"Insurance", {"from": accounts[0]})
assert gauge_controller.get_type_weight(0) == TYPE_WEIGHTS[0]
assert gauge_controller.get_type_weight(1) == 0
def test_change_type_weight(accounts, gauge_controller):
gauge_controller.add_type(b"Insurance", {"from": accounts[0]})
gauge_controller.change_type_weight(1, TYPE_WEIGHTS[1], {"from": accounts[0]})
gauge_controller.change_type_weight(0, 31337, {"from": accounts[0]})
assert gauge_controller.get_type_weight(0) == 31337
assert gauge_controller.get_type_weight(1) == TYPE_WEIGHTS[1]
def test_relative_weight_write(accounts, chain, gauge_controller, three_gauges, skip_coverage):
gauge_controller.add_type(b"Insurance", TYPE_WEIGHTS[1], {"from": accounts[0]})
gauge_controller.add_gauge(three_gauges[0], 0, GAUGE_WEIGHTS[0], {"from": accounts[0]})
gauge_controller.add_gauge(three_gauges[1], 0, GAUGE_WEIGHTS[1], {"from": accounts[0]})
gauge_controller.add_gauge(three_gauges[2], 1, GAUGE_WEIGHTS[2], {"from": accounts[0]})
total_weight = (
TYPE_WEIGHTS[0] * GAUGE_WEIGHTS[0]
+ TYPE_WEIGHTS[0] * GAUGE_WEIGHTS[1]
+ TYPE_WEIGHTS[1] * GAUGE_WEIGHTS[2]
)
chain.sleep(int(1.1 * YEAR))
# Fill weights and check that nothing has changed
t = chain.time()
for gauge, w, gauge_type in zip(three_gauges, GAUGE_WEIGHTS, [0, 0, 1]):
gauge_controller.gauge_relative_weight_write(gauge, t)
relative_weight = gauge_controller.gauge_relative_weight(gauge, t)
assert relative_weight == 10 ** 18 * w * TYPE_WEIGHTS[gauge_type] / total_weight
| 37.477477 | 95 | 0.722837 |
79427fbda58bff26e958303467032bf18314f192 | 1,335 | py | Python | test/test_v1_volume_mount.py | pearsontechnology/k8sv1 | f9df106a4f2492d35af63b6bb2b1da4ed2b84579 | [
"Apache-2.0"
] | 1 | 2016-08-10T15:06:53.000Z | 2016-08-10T15:06:53.000Z | test/test_v1_volume_mount.py | pearsontechnology/k8sv1 | f9df106a4f2492d35af63b6bb2b1da4ed2b84579 | [
"Apache-2.0"
] | null | null | null | test/test_v1_volume_mount.py | pearsontechnology/k8sv1 | f9df106a4f2492d35af63b6bb2b1da4ed2b84579 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import k8sv1
from k8sv1.rest import ApiException
from k8sv1.models.v1_volume_mount import V1VolumeMount
class TestV1VolumeMount(unittest.TestCase):
""" V1VolumeMount unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1VolumeMount(self):
"""
Test V1VolumeMount
"""
model = k8sv1.models.v1_volume_mount.V1VolumeMount()
if __name__ == '__main__':
unittest.main()
| 25.188679 | 104 | 0.713109 |
79427fc696a65dc7442e39342261a3f5df774cae | 672 | py | Python | bindings/python/button-example.py | ali-hdvr/libsurvive | 576fdbac0f9f18f7c90bb65503dbd8508a52af00 | [
"MIT"
] | 377 | 2016-12-03T06:44:50.000Z | 2020-02-09T21:48:46.000Z | bindings/python/button-example.py | ali-hdvr/libsurvive | 576fdbac0f9f18f7c90bb65503dbd8508a52af00 | [
"MIT"
] | 87 | 2016-12-05T04:07:18.000Z | 2020-02-04T15:10:16.000Z | bindings/python/button-example.py | ali-hdvr/libsurvive | 576fdbac0f9f18f7c90bb65503dbd8508a52af00 | [
"MIT"
] | 62 | 2016-12-03T06:38:02.000Z | 2020-02-04T19:21:14.000Z | import sys
import pysurvive
ctx = pysurvive.init(sys.argv)
if ctx is None: # implies -help or similiar
exit(-1)
def button_func(obj, eventtype, buttonid, axisids, axisvals):
if eventtype == pysurvive.SURVIVE_INPUT_EVENT_BUTTON_DOWN:
eventstring = "DOWN"
elif eventtype == pysurvive.SURVIVE_INPUT_EVENT_BUTTON_UP:
eventstring = "UP"
else:
eventstring = "%d" % (eventtype)
print("Button %d on %s generated event %s"%(buttonid, obj.contents.codename.decode('utf8'), eventstring))
keepRunning = True
pysurvive.install_button_fn(ctx, button_func)
while keepRunning and pysurvive.poll(ctx) == 0:
pass
pysurvive.close(ctx)
| 24.888889 | 109 | 0.71131 |
794281432fe4d2d4aaee82c41c08b488a6933e16 | 15,849 | py | Python | projectq/libs/math/_gates.py | ionq/ProjectQ | 0cf7322cde910f79c6d4515fed36beaad2ae2f40 | [
"Apache-2.0"
] | 4 | 2021-07-09T04:14:36.000Z | 2022-01-31T01:39:56.000Z | projectq/libs/math/_gates.py | ionq/ProjectQ | 0cf7322cde910f79c6d4515fed36beaad2ae2f40 | [
"Apache-2.0"
] | 14 | 2021-06-21T12:19:09.000Z | 2022-03-17T16:06:06.000Z | projectq/libs/math/_gates.py | ionq/ProjectQ | 0cf7322cde910f79c6d4515fed36beaad2ae2f40 | [
"Apache-2.0"
] | 1 | 2021-07-09T15:04:49.000Z | 2021-07-09T15:04:49.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Math gates for ProjectQ"""
from projectq.ops import BasicMathGate
class AddConstant(BasicMathGate):
"""
Add a constant to a quantum number represented by a quantum register,
stored from low- to high-bit.
Example:
.. code-block:: python
qunum = eng.allocate_qureg(5) # 5-qubit number
X | qunum[1] # qunum is now equal to 2
AddConstant(3) | qunum # qunum is now equal to 5
Important: if you run with conditional and carry, carry needs to
be a quantum register for the compiler/decomposition to work.
"""
def __init__(self, a):
"""
Initializes the gate to the number to add.
Args:
a (int): Number to add to a quantum register.
It also initializes its base class, BasicMathGate, with the
corresponding function, so it can be emulated efficiently.
"""
BasicMathGate.__init__(self, lambda x: ((x + a),))
self.a = a
def get_inverse(self):
"""
Return the inverse gate (subtraction of the same constant).
"""
return SubConstant(self.a)
def __str__(self):
return "AddConstant({})".format(self.a)
def __eq__(self, other):
return isinstance(other, AddConstant) and self.a == other.a
def __hash__(self):
return hash(str(self))
def __ne__(self, other):
return not self.__eq__(other)
def SubConstant(a):
"""
Subtract a constant from a quantum number represented by a quantum
register, stored from low- to high-bit.
Args:
a (int): Constant to subtract
Example:
.. code-block:: python
qunum = eng.allocate_qureg(5) # 5-qubit number
X | qunum[2] # qunum is now equal to 4
SubConstant(3) | qunum # qunum is now equal to 1
"""
return AddConstant(-a)
class AddConstantModN(BasicMathGate):
"""
Add a constant to a quantum number represented by a quantum register
modulo N.
The number is stored from low- to high-bit, i.e., qunum[0] is the LSB.
Example:
.. code-block:: python
qunum = eng.allocate_qureg(5) # 5-qubit number
X | qunum[1] # qunum is now equal to 2
AddConstantModN(3, 4) | qunum # qunum is now equal to 1
.. note::
Pre-conditions:
* c < N
* c >= 0
* The value stored in the quantum register must be lower than N
"""
def __init__(self, a, N):
"""
Initializes the gate to the number to add modulo N.
Args:
a (int): Number to add to a quantum register (0 <= a < N).
N (int): Number modulo which the addition is carried out.
It also initializes its base class, BasicMathGate, with the
corresponding function, so it can be emulated efficiently.
"""
BasicMathGate.__init__(self, lambda x: ((x + a) % N,))
self.a = a
self.N = N
def __str__(self):
return "AddConstantModN({}, {})".format(self.a, self.N)
def get_inverse(self):
"""
Return the inverse gate (subtraction of the same number a modulo the
same number N).
"""
return SubConstantModN(self.a, self.N)
def __eq__(self, other):
return isinstance(other, AddConstantModN) and self.a == other.a and self.N == other.N
def __hash__(self):
return hash(str(self))
def __ne__(self, other):
return not self.__eq__(other)
def SubConstantModN(a, N):
"""
Subtract a constant from a quantum number represented by a quantum
register modulo N.
The number is stored from low- to high-bit, i.e., qunum[0] is the LSB.
Args:
a (int): Constant to add
N (int): Constant modulo which the addition of a should be carried
out.
Example:
.. code-block:: python
qunum = eng.allocate_qureg(3) # 3-qubit number
X | qunum[1] # qunum is now equal to 2
SubConstantModN(4,5) | qunum # qunum is now -2 = 6 = 1 (mod 5)
.. note::
Pre-conditions:
* c < N
* c >= 0
* The value stored in the quantum register must be lower than N
"""
return AddConstantModN(N - a, N)
class MultiplyByConstantModN(BasicMathGate):
"""
Multiply a quantum number represented by a quantum register by a constant
modulo N.
The number is stored from low- to high-bit, i.e., qunum[0] is the LSB.
Example:
.. code-block:: python
qunum = eng.allocate_qureg(5) # 5-qubit number
X | qunum[2] # qunum is now equal to 4
MultiplyByConstantModN(3,5) | qunum # qunum is now 2.
.. note::
Pre-conditions:
* c < N
* c >= 0
* gcd(c, N) == 1
* The value stored in the quantum register must be lower than N
"""
def __init__(self, a, N):
"""
Initializes the gate to the number to multiply with modulo N.
Args:
a (int): Number by which to multiply a quantum register
(0 <= a < N).
N (int): Number modulo which the multiplication is carried out.
It also initializes its base class, BasicMathGate, with the
corresponding function, so it can be emulated efficiently.
"""
BasicMathGate.__init__(self, lambda x: ((a * x) % N,))
self.a = a
self.N = N
def __str__(self):
return "MultiplyByConstantModN({}, {})".format(self.a, self.N)
def __eq__(self, other):
return isinstance(other, MultiplyByConstantModN) and self.a == other.a and self.N == other.N
def __hash__(self):
return hash(str(self))
def __ne__(self, other):
return not self.__eq__(other)
class AddQuantumGate(BasicMathGate):
"""
Adds up two quantum numbers represented by quantum registers.
The numbers are stored from low- to high-bit, i.e., qunum[0] is the LSB.
Example:
.. code-block:: python
qunum_a = eng.allocate_qureg(5) # 5-qubit number
qunum_b = eng.allocate_qureg(5) # 5-qubit number
carry_bit = eng.allocate_qubit()
X | qunum_a[2] #qunum_a is now equal to 4
X | qunum_b[3] #qunum_b is now equal to 8
AddQuantum | (qunum_a, qunum_b, carry)
# qunum_a remains 4, qunum_b is now 12 and carry_bit is 0
"""
def __init__(self):
BasicMathGate.__init__(self, None)
def __str__(self):
return "AddQuantum"
def __eq__(self, other):
return isinstance(other, AddQuantumGate)
def __hash__(self):
return hash(str(self))
def __ne__(self, other):
return not self.__eq__(other)
def get_math_function(self, qubits):
n = len(qubits[0])
def math_fun(a):
a[1] = a[0] + a[1]
if len(bin(a[1])[2:]) > n:
a[1] = a[1] % (2 ** n)
if len(a) == 3:
# Flip the last bit of the carry register
a[2] ^= 1
return a
return math_fun
def get_inverse(self):
"""
Return the inverse gate (subtraction of the same number a modulo the
same number N).
"""
return _InverseAddQuantumGate()
AddQuantum = AddQuantumGate()
class _InverseAddQuantumGate(BasicMathGate):
"""
Internal gate glass to support emulation for inverse
addition.
"""
def __init__(self):
BasicMathGate.__init__(self, None)
def __str__(self):
return "_InverseAddQuantum"
def get_math_function(self, qubits):
def math_fun(a):
if len(a) == 3:
# Flip the last bit of the carry register
a[2] ^= 1
a[1] -= a[0]
return a
return math_fun
class SubtractQuantumGate(BasicMathGate):
"""
Subtract one quantum number represented by a quantum register from
another quantum number represented by a quantum register.
Example:
.. code-block:: python
qunum_a = eng.allocate_qureg(5) # 5-qubit number
qunum_b = eng.allocate_qureg(5) # 5-qubit number
X | qunum_a[2] #qunum_a is now equal to 4
X | qunum_b[3] #qunum_b is now equal to 8
SubtractQuantum | (qunum_a, qunum_b)
# qunum_a remains 4, qunum_b is now 4
"""
def __init__(self):
"""
Initializes the gate to its base class, BasicMathGate, with the
corresponding function, so it can be emulated efficiently.
"""
def subtract(a, b):
return (a, b - a)
BasicMathGate.__init__(self, subtract)
def __str__(self):
return "SubtractQuantum"
def __eq__(self, other):
return isinstance(other, SubtractQuantumGate)
def __hash__(self):
return hash(str(self))
def __ne__(self, other):
return not self.__eq__(other)
def get_inverse(self):
"""
Return the inverse gate (subtraction of the same number a modulo the
same number N).
"""
return AddQuantum
SubtractQuantum = SubtractQuantumGate()
class ComparatorQuantumGate(BasicMathGate):
"""
Flips a compare qubit if the binary value of first imput is higher than
the second input.
The numbers are stored from low- to high-bit, i.e., qunum[0] is the LSB.
Example:
.. code-block:: python
qunum_a = eng.allocate_qureg(5) # 5-qubit number
qunum_b = eng.allocate_qureg(5) # 5-qubit number
compare_bit = eng.allocate_qubit()
X | qunum_a[4] #qunum_a is now equal to 16
X | qunum_b[3] #qunum_b is now equal to 8
ComparatorQuantum | (qunum_a, qunum_b, compare_bit)
# qunum_a and qunum_b remain 16 and 8, qunum_b is now 12 and
compare bit is now 1
"""
def __init__(self):
"""
Initializes the gate and its base class, BasicMathGate, with the
corresponding function, so it can be emulated efficiently.
"""
def compare(a, b, c):
if b < a:
if c == 0:
c = 1
else:
c = 0
return (a, b, c)
BasicMathGate.__init__(self, compare)
def __str__(self):
return "Comparator"
def __eq__(self, other):
return isinstance(other, ComparatorQuantumGate)
def __hash__(self):
return hash(str(self))
def __ne__(self, other):
return not self.__eq__(other)
def get_inverse(self):
"""
Return the inverse gate
"""
return AddQuantum
ComparatorQuantum = ComparatorQuantumGate()
class DivideQuantumGate(BasicMathGate):
"""
Divides one quantum number from another. Takes three inputs which should
be quantum registers of equal size; a dividend, a remainder and a
divisor. The remainder should be in the state |0...0> and the dividend
should be bigger than the divisor.The gate returns (in this order): the
remainder, the quotient and the divisor.
The numbers are stored from low- to high-bit, i.e., qunum[0] is the LSB.
Example:
.. code-block:: python
qunum_a = eng.allocate_qureg(5) # 5-qubit number
qunum_b = eng.allocate_qureg(5) # 5-qubit number
qunum_c = eng.allocate_qureg(5) # 5-qubit number
All(X) | [qunum_a[0],qunum_a[3]] #qunum_a is now equal to 9
X | qunum_c[2] #qunum_c is now equal to 4
DivideQuantum | (qunum_a, qunum_b,qunum_c)
# qunum_a is now equal to 1 (remainder), qunum_b is now
# equal to 2 (quotient) and qunum_c remains 4 (divisor)
|dividend>|remainder>|divisor> -> |remainder>|quotient>|divisor>
"""
def __init__(self):
"""
Initializes the gate and its base class, BasicMathGate, with the
corresponding function, so it can be emulated efficiently.
"""
def division(dividend, remainder, divisor):
if divisor == 0 or divisor > dividend:
return (remainder, dividend, divisor)
else:
quotient = remainder + dividend // divisor
return ((dividend - (quotient * divisor)), quotient, divisor)
BasicMathGate.__init__(self, division)
def get_inverse(self):
return _InverseDivideQuantumGate()
def __str__(self):
return "DivideQuantum"
def __eq__(self, other):
return isinstance(other, DivideQuantumGate)
def __hash__(self):
return hash(str(self))
def __ne__(self, other):
return not self.__eq__(other)
DivideQuantum = DivideQuantumGate()
class _InverseDivideQuantumGate(BasicMathGate):
"""
Internal gate glass to support emulation for inverse
division.
"""
def __init__(self):
def inverse_division(remainder, quotient, divisor):
if divisor == 0:
return (quotient, remainder, divisor)
dividend = remainder + quotient * divisor
remainder = 0
return (dividend, remainder, divisor)
BasicMathGate.__init__(self, inverse_division)
def __str__(self):
return "_InverseDivideQuantum"
class MultiplyQuantumGate(BasicMathGate):
"""
Multiplies two quantum numbers represented by a quantum registers.
Requires three quantum registers as inputs, the first two are the
numbers to be multiplied and should have the same size (n qubits). The
third register will hold the product and should be of size 2n+1.
The numbers are stored from low- to high-bit, i.e., qunum[0] is the LSB.
Example:
.. code-block:: python
qunum_a = eng.allocate_qureg(4)
qunum_b = eng.allocate_qureg(4)
qunum_c = eng.allocate_qureg(9)
X | qunum_a[2] # qunum_a is now 4
X | qunum_b[3] # qunum_b is now 8
MultiplyQuantum() | (qunum_a, qunum_b, qunum_c)
# qunum_a remains 4 and qunum_b remains 8, qunum_c is now equal to 32
"""
def __init__(self):
"""
Initializes the gate and its base class, BasicMathGate, with the
corresponding function, so it can be emulated efficiently.
"""
def multiply(a, b, c):
return (a, b, c + a * b)
BasicMathGate.__init__(self, multiply)
def __str__(self):
return "MultiplyQuantum"
def __eq__(self, other):
return isinstance(other, MultiplyQuantumGate)
def __hash__(self):
return hash(str(self))
def __ne__(self, other):
return not self.__eq__(other)
def get_inverse(self):
return _InverseMultiplyQuantumGate()
MultiplyQuantum = MultiplyQuantumGate()
class _InverseMultiplyQuantumGate(BasicMathGate):
"""
Internal gate glass to support emulation for inverse
multiplication.
"""
def __init__(self):
def inverse_multiplication(a, b, c):
return (a, b, c - a * b)
BasicMathGate.__init__(self, inverse_multiplication)
def __str__(self):
return "_InverseMultiplyQuantum"
| 28.051327 | 100 | 0.604202 |
794281c62f53bff2c4a1c95ad1e4d4b5326c55c2 | 3,323 | py | Python | retools/lock.py | dolead/retools | 28847168d768e3ff8f6e296627175085a205790c | [
"MIT"
] | null | null | null | retools/lock.py | dolead/retools | 28847168d768e3ff8f6e296627175085a205790c | [
"MIT"
] | null | null | null | retools/lock.py | dolead/retools | 28847168d768e3ff8f6e296627175085a205790c | [
"MIT"
] | null | null | null | """A Redis backed distributed global lock
This lock based mostly on this excellent example:
http://chris-lamb.co.uk/2010/06/07/distributing-locking-python-and-redis/
This code add's one change as suggested by the Redis documentation regarding
using locks in Redis, which is to only delete the Redis lock if we actually
completed within the timeout period. If we took too long to execute, then the
lock stored here is actually from a *different* client holding a lock and
we shouldn't be deleting their lock.
"""
# Copyright 2010,2011 Chris Lamb <[email protected]>
import time
import random
from retools import global_connection
class Lock(object):
def __init__(self, key, expires=60, timeout=10, redis=None):
"""
Distributed locking using Redis SETNX and GETSET.
Usage::
with Lock('my_lock'):
print "Critical section"
:param expires: We consider any existing lock older than
``expires`` seconds to be invalid in order to
detect crashed clients. This value must be higher
than it takes the critical section to execute.
:param timeout: If another client has already obtained the lock,
sleep for a maximum of ``timeout`` seconds before
giving up. A value of 0 means we never wait.
:param redis: The redis instance to use if the default global
redis connection is not desired.
"""
self.key = key
self.timeout = timeout
self.expires = expires
if not redis:
redis = global_connection.redis
self.redis = redis
self.start_time = time.time()
def __enter__(self):
redis = self.redis
timeout = self.timeout
retry_sleep = 0.005
if self.expires is None:
return
while timeout >= 0:
expires = time.time() + self.expires + 1
if redis.setnx(self.key, expires):
# We gained the lock; enter critical section
self.start_time = time.time()
redis.expire(self.key, int(self.expires))
return
current_value = redis.get(self.key)
# We found an expired lock and nobody raced us to replacing it
if current_value and float(current_value) < time.time() and \
redis.getset(self.key, expires) == current_value:
self.start_time = time.time()
redis.expire(self.key, int(self.expires))
return
timeout -= 1
if timeout >= 0:
time.sleep(random.uniform(0, retry_sleep))
retry_sleep = min(retry_sleep * 2, 1)
raise LockTimeout("Timeout while waiting for lock")
def __exit__(self, exc_type, exc_value, traceback):
# Only delete the key if we completed within the lock expiration,
# otherwise, another lock might've been established
if self.expires is None:
return
if time.time() - self.start_time < self.expires:
self.redis.delete(self.key)
class LockTimeout(BaseException):
"""Raised in the event a timeout occurs while waiting for a lock"""
| 36.119565 | 77 | 0.604574 |
794281cafa4f98f84b36cb43101f03491c1d0352 | 1,272 | py | Python | tests/api_resources/test_application_fee_refund.py | bhch/async-stripe | 75d934a8bb242f664e7be30812c12335cf885287 | [
"MIT",
"BSD-3-Clause"
] | 8 | 2021-05-29T08:57:58.000Z | 2022-02-19T07:09:25.000Z | tests/api_resources/test_application_fee_refund.py | bhch/async-stripe | 75d934a8bb242f664e7be30812c12335cf885287 | [
"MIT",
"BSD-3-Clause"
] | 5 | 2021-05-31T10:18:36.000Z | 2022-01-25T11:39:03.000Z | tests/api_resources/test_application_fee_refund.py | bhch/async-stripe | 75d934a8bb242f664e7be30812c12335cf885287 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2021-05-29T13:27:10.000Z | 2021-05-29T13:27:10.000Z | from __future__ import absolute_import, division, print_function
import pytest
import stripe
import pytest
pytestmark = pytest.mark.asyncio
TEST_RESOURCE_ID = "fr_123"
TEST_APPFEE_ID = "fee_123"
class TestApplicationFeeRefund(object):
async def test_is_saveable(self, request_mock):
appfee = await stripe.ApplicationFee.retrieve(TEST_APPFEE_ID)
resource = await appfee.refunds.retrieve(TEST_RESOURCE_ID)
resource.metadata["key"] = "value"
await resource.save()
request_mock.assert_requested(
"post",
"/v1/application_fees/%s/refunds/%s"
% (TEST_APPFEE_ID, TEST_RESOURCE_ID),
)
async def test_is_modifiable(self, request_mock):
resource = await stripe.ApplicationFeeRefund.modify(
TEST_APPFEE_ID, TEST_RESOURCE_ID, metadata={"key": "value"}
)
request_mock.assert_requested(
"post",
"/v1/application_fees/%s/refunds/%s"
% (TEST_APPFEE_ID, TEST_RESOURCE_ID),
)
assert isinstance(resource, stripe.ApplicationFeeRefund)
async def test_is_not_retrievable(self):
with pytest.raises(NotImplementedError):
await stripe.ApplicationFeeRefund.retrieve(TEST_RESOURCE_ID)
| 30.285714 | 72 | 0.68239 |
794281f7a10014ae0e3b2dd30805e3c8c303b6d3 | 163 | py | Python | tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_MovingAverage_BestCycle_SVR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_MovingAverage_BestCycle_SVR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_MovingAverage_BestCycle_SVR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['MovingAverage'] , ['BestCycle'] , ['SVR'] ); | 40.75 | 85 | 0.754601 |
79428338800187a14771fd7603482689d78daa6d | 307 | py | Python | example_app/set_sys_path.py | soofaloofa/gutter-appengine | 279073e18939e8070421210ca4d65f2f16d7ee5c | [
"MIT"
] | 1 | 2015-06-01T19:31:33.000Z | 2015-06-01T19:31:33.000Z | example_app/set_sys_path.py | soofaloofa/gutter-appengine | 279073e18939e8070421210ca4d65f2f16d7ee5c | [
"MIT"
] | null | null | null | example_app/set_sys_path.py | soofaloofa/gutter-appengine | 279073e18939e8070421210ca4d65f2f16d7ee5c | [
"MIT"
] | null | null | null | import os
import sys
CURRENT_PATH = os.path.abspath(os.path.dirname(__file__))
def set_sys_path():
# Add lib as primary libraries directory, with fallback to lib/dist
# and optionally to lib/dist.zip, loaded using zipimport.
sys.path[0:0] = [
os.path.join(CURRENT_PATH, 'lib'),
]
| 23.615385 | 71 | 0.687296 |
79428345c64dce00c672e545483a4f00f47ea31f | 2,159 | py | Python | front/models.py | llazzaro/django-front | 8a04a88d42b37f4882ab43415e5f20bedae9d257 | [
"MIT"
] | 135 | 2015-01-12T22:21:41.000Z | 2021-12-12T03:52:04.000Z | front/models.py | P-Designs/django-front | 2f7daaa70d6b2210f4a4ad0c251b0893f15bd711 | [
"MIT"
] | 12 | 2015-04-10T12:45:04.000Z | 2020-03-22T17:32:32.000Z | front/models.py | P-Designs/django-front | 2f7daaa70d6b2210f4a4ad0c251b0893f15bd711 | [
"MIT"
] | 24 | 2015-01-24T01:22:18.000Z | 2022-03-15T13:06:47.000Z | from django.db import models
from django.core.cache import cache
from django.dispatch import receiver
from django.db.models.signals import post_save
import hashlib
import six
class Placeholder(models.Model):
key = models.CharField(max_length=40, primary_key=True, db_index=True)
value = models.TextField(blank=True)
def __unicode__(self):
return self.value
def cache_key(self):
return "front-edit-%s" % self.key
@classmethod
def key_for(cls, name, *bits):
return hashlib.new('sha1', six.text_type(name + ''.join([six.text_type(token) for token in bits])).encode('utf8')).hexdigest()
@classmethod
def copy_content(cls, name, source_bits, target_bits):
source_key = cls.key_for(name, *source_bits)
target_key = cls.key_for(name, *target_bits)
source = cls.objects.filter(key=source_key)
if source.exists():
source = source.get()
cls.objects.create(key=target_key, value=source.value)
class PlaceholderHistory(models.Model):
placeholder = models.ForeignKey(Placeholder, related_name='history', on_delete=models.CASCADE)
value = models.TextField(blank=True)
saved = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-saved', )
@property
def _as_json(self):
return {'value': self.value, 'saved': self.saved.strftime('%s')}
@receiver(post_save, sender=Placeholder)
def save_placeholder(sender, instance, created, raw, *args, **kwargs):
if not raw:
# If we have placeholders, check wheter the content has changed before saving history
if PlaceholderHistory.objects.filter(placeholder=instance).exists():
ph = PlaceholderHistory.objects.all()[0]
if ph.value != instance.value:
PlaceholderHistory.objects.create(placeholder=instance, value=instance.value)
else:
PlaceholderHistory.objects.create(placeholder=instance, value=instance.value)
@receiver(post_save, sender=PlaceholderHistory)
def save_history(sender, instance, created, raw, *args, **kwargs):
cache.delete(instance.placeholder.cache_key())
| 34.822581 | 134 | 0.696619 |
794283e09f59053c4936189314f3f0b0fe85e5d0 | 4,313 | py | Python | homeassistant/components/hue/__init__.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 22,481 | 2020-03-02T13:09:59.000Z | 2022-03-31T23:34:28.000Z | homeassistant/components/hue/__init__.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | homeassistant/components/hue/__init__.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 11,411 | 2020-03-02T14:19:20.000Z | 2022-03-31T22:46:07.000Z | """Support for the Philips Hue system."""
from aiohue.util import normalize_bridge_id
from homeassistant import config_entries, core
from homeassistant.components import persistent_notification
from homeassistant.helpers import device_registry as dr
from .bridge import HueBridge
from .const import DOMAIN, SERVICE_HUE_ACTIVATE_SCENE
from .migration import check_migration
from .services import async_register_services
async def async_setup_entry(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
) -> bool:
"""Set up a bridge from a config entry."""
# check (and run) migrations if needed
await check_migration(hass, entry)
# setup the bridge instance
bridge = HueBridge(hass, entry)
if not await bridge.async_initialize_bridge():
return False
# register Hue domain services
async_register_services(hass)
api = bridge.api
# For backwards compat
unique_id = normalize_bridge_id(api.config.bridge_id)
if entry.unique_id is None:
hass.config_entries.async_update_entry(entry, unique_id=unique_id)
# For recovering from bug where we incorrectly assumed homekit ID = bridge ID
# Remove this logic after Home Assistant 2022.4
elif entry.unique_id != unique_id:
# Find entries with this unique ID
other_entry = next(
(
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.unique_id == unique_id
),
None,
)
if other_entry is None:
# If no other entry, update unique ID of this entry ID.
hass.config_entries.async_update_entry(entry, unique_id=unique_id)
elif other_entry.source == config_entries.SOURCE_IGNORE:
# There is another entry but it is ignored, delete that one and update this one
hass.async_create_task(
hass.config_entries.async_remove(other_entry.entry_id)
)
hass.config_entries.async_update_entry(entry, unique_id=unique_id)
else:
# There is another entry that already has the right unique ID. Delete this entry
hass.async_create_task(hass.config_entries.async_remove(entry.entry_id))
return False
# add bridge device to device registry
device_registry = dr.async_get(hass)
if bridge.api_version == 1:
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, api.config.mac_address)},
identifiers={(DOMAIN, api.config.bridge_id)},
manufacturer="Signify",
name=api.config.name,
model=api.config.model_id,
sw_version=api.config.software_version,
)
# create persistent notification if we found a bridge version with security vulnerability
if (
api.config.model_id == "BSB002"
and api.config.software_version < "1935144040"
):
persistent_notification.async_create(
hass,
"Your Hue hub has a known security vulnerability ([CVE-2020-6007] "
"(https://cve.circl.lu/cve/CVE-2020-6007)). "
"Go to the Hue app and check for software updates.",
"Signify Hue",
"hue_hub_firmware",
)
else:
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, api.config.mac_address)},
identifiers={
(DOMAIN, api.config.bridge_id),
(DOMAIN, api.config.bridge_device.id),
},
manufacturer=api.config.bridge_device.product_data.manufacturer_name,
name=api.config.name,
model=api.config.model_id,
sw_version=api.config.software_version,
)
return True
async def async_unload_entry(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Unload a config entry."""
unload_success = await hass.data[DOMAIN][entry.entry_id].async_reset()
if len(hass.data[DOMAIN]) == 0:
hass.data.pop(DOMAIN)
hass.services.async_remove(DOMAIN, SERVICE_HUE_ACTIVATE_SCENE)
return unload_success
| 37.504348 | 97 | 0.656156 |
7942854af2003faf969bef9948d51919aecb34db | 7,178 | py | Python | gitrevise/tui.py | krobelus/git-revise | 55bfb71bd4e9232b3b0befe171bc72d412856ae2 | [
"MIT"
] | null | null | null | gitrevise/tui.py | krobelus/git-revise | 55bfb71bd4e9232b3b0befe171bc72d412856ae2 | [
"MIT"
] | null | null | null | gitrevise/tui.py | krobelus/git-revise | 55bfb71bd4e9232b3b0befe171bc72d412856ae2 | [
"MIT"
] | null | null | null | from typing import Optional, List
from argparse import ArgumentParser, Namespace
from subprocess import CalledProcessError
import sys
from .odb import Repository, Commit, Reference
from .utils import (
EditorError,
commit_range,
edit_commit_message,
update_head,
cut_commit,
local_commits,
)
from .todo import (
CyclicFixupError,
apply_todos,
build_todos,
edit_todos,
autosquash_todos,
)
from .merge import MergeConflict
from . import __version__
def build_parser() -> ArgumentParser:
parser = ArgumentParser(
description="""\
Rebase staged changes onto the given commit, and rewrite history to
incorporate these changes."""
)
parser.add_argument("target", nargs="?", help="target commit to apply fixups to")
parser.add_argument("--ref", default="HEAD", help="reference to update")
parser.add_argument(
"--reauthor",
action="store_true",
help="reset the author of the targeted commit",
)
parser.add_argument("--version", action="version", version=__version__)
parser.add_argument(
"--edit",
"-e",
action="store_true",
help="edit commit message of targeted commit(s)",
)
autosquash_group = parser.add_mutually_exclusive_group()
autosquash_group.add_argument(
"--autosquash",
action="store_true",
help="automatically apply fixup! and squash! commits to their targets",
)
autosquash_group.add_argument(
"--no-autosquash",
action="store_true",
help="force disable revise.autoSquash behaviour",
)
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument(
"--no-index",
action="store_true",
help="ignore the index while rewriting history",
)
index_group.add_argument(
"--all",
"-a",
action="store_true",
help="stage all tracked files before running",
)
index_group.add_argument(
"--patch",
"-p",
action="store_true",
help="interactively stage hunks before running",
)
mode_group = parser.add_mutually_exclusive_group()
mode_group.add_argument(
"--interactive",
"-i",
action="store_true",
help="interactively edit commit stack",
)
mode_group.add_argument(
"--message",
"-m",
action="append",
help="specify commit message on command line",
)
mode_group.add_argument(
"--cut",
"-c",
action="store_true",
help="interactively cut a commit into two smaller commits",
)
return parser
def interactive(
args: Namespace, repo: Repository, staged: Optional[Commit], head: Reference[Commit]
):
assert head.target is not None
if args.target is None:
base, to_rebase = local_commits(repo, head.target)
else:
base = repo.get_commit(args.target)
to_rebase = commit_range(base, head.target)
# Build up an initial todos list, edit that todos list.
todos = original = build_todos(to_rebase, staged)
if enable_autosquash(args, repo):
todos = autosquash_todos(todos)
if args.interactive:
todos = edit_todos(repo, todos, msgedit=args.edit)
if todos != original:
# Perform the todo list actions.
new_head = apply_todos(base, todos, reauthor=args.reauthor)
# Update the value of HEAD to the new state.
update_head(head, new_head, None)
else:
print("(warning) no changes performed", file=sys.stderr)
def enable_autosquash(args: Namespace, repo: Repository) -> bool:
if args.autosquash:
return True
if args.no_autosquash:
return False
return repo.bool_config(
"revise.autoSquash",
default=repo.bool_config("rebase.autoSquash", default=False),
)
def noninteractive(
args: Namespace, repo: Repository, staged: Optional[Commit], head: Reference[Commit]
):
assert head.target is not None
if args.target is None:
raise ValueError("<target> is a required argument")
head = repo.get_commit_ref(args.ref)
if head.target is None:
raise ValueError("Invalid target reference")
current = replaced = repo.get_commit(args.target)
to_rebase = commit_range(current, head.target)
# Apply changes to the target commit.
final = head.target.tree()
if staged:
print(f"Applying staged changes to '{args.target}'")
current = current.update(tree=staged.rebase(current).tree())
final = staged.rebase(head.target).tree()
# Update the commit message on the target commit if requested.
if args.message:
message = b"\n".join(l.encode("utf-8") + b"\n" for l in args.message)
current = current.update(message=message)
# Prompt the user to edit the commit message if requested.
if args.edit:
current = edit_commit_message(current)
# Rewrite the author to match the current user if requested.
if args.reauthor:
current = current.update(author=repo.default_author)
# If the commit should be cut, prompt the user to perform the cut.
if args.cut:
current = cut_commit(current)
if current != replaced:
print(f"{current.oid.short()} {current.summary()}")
# Rebase commits atop the commit range.
for commit in to_rebase:
current = commit.rebase(current)
print(f"{current.oid.short()} {current.summary()}")
update_head(head, current, final)
else:
print("(warning) no changes performed", file=sys.stderr)
def inner_main(args: Namespace, repo: Repository):
# If '-a' or '-p' was specified, stage changes.
if args.all:
repo.git("add", "-u")
if args.patch:
repo.git("add", "-p")
# Create a commit with changes from the index
staged = None
if not args.no_index:
staged = repo.index.commit(message=b"<git index>")
if staged.tree() == staged.parent().tree():
staged = None # No changes, ignore the commit
# Determine the HEAD reference which we're going to update.
head = repo.get_commit_ref(args.ref)
if head.target is None:
raise ValueError("Head reference not found!")
# Either enter the interactive or non-interactive codepath.
if args.interactive or args.autosquash:
interactive(args, repo, staged, head)
else:
noninteractive(args, repo, staged, head)
def main(argv: Optional[List[str]] = None):
args = build_parser().parse_args(argv)
try:
with Repository() as repo:
inner_main(args, repo)
except CalledProcessError as err:
print(f"subprocess exited with non-zero status: {err.returncode}")
sys.exit(1)
except CyclicFixupError as err:
print(f"todo error: {err}")
sys.exit(1)
except EditorError as err:
print(f"editor error: {err}")
sys.exit(1)
except MergeConflict as err:
print(f"merge conflict: {err}")
sys.exit(1)
except ValueError as err:
print(f"invalid value: {err}")
sys.exit(1)
| 29.908333 | 88 | 0.642101 |
794285aa5e0a2f8571e77d0c0d8a68fff49f2cb1 | 613 | py | Python | stocal/examples/events.py | MrLiono21/stocal | 7f7110c5b6401e7332d5d35c843b6fedafd464c2 | [
"MIT"
] | null | null | null | stocal/examples/events.py | MrLiono21/stocal | 7f7110c5b6401e7332d5d35c843b6fedafd464c2 | [
"MIT"
] | null | null | null | stocal/examples/events.py | MrLiono21/stocal | 7f7110c5b6401e7332d5d35c843b6fedafd464c2 | [
"MIT"
] | null | null | null | """Event example
stocal.Event's can be added to a processes definition just like
Reactions. Process.trajectory returns an TrajectorySampler that
can cope with deterministic transitions (e.g. FirstReactionMethod).
Sampler selection and usage is entirely transparent to the user.
"""
import stocal
process = stocal.Process([
stocal.MassAction(['A', 'A'], ['A2'], 0.01),
stocal.MassAction(['A2'], ['A', 'A'], 1.),
stocal.Event([], ['A'], 0., 1.)
])
if __name__ == '__main__':
traj = process.trajectory({}, tmax=100)
for _ in traj:
print(traj.time, traj.state['A'], traj.state['A2'])
| 27.863636 | 67 | 0.668842 |
7942869f91600f5ced12b5178a647eb9df42255f | 22,751 | py | Python | venv/Lib/site-packages/pandas/core/groupby/grouper.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/core/groupby/grouper.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/core/groupby/grouper.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | null | null | null | """
Provide user facing operators for doing the split part of the
split-apply-combine paradigm.
"""
from typing import Dict, Hashable, List, Optional, Tuple
import numpy as np
from pandas._typing import FrameOrSeries
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
ensure_categorical,
is_categorical_dtype,
is_datetime64_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical, ExtensionArray
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.groupby import ops
from pandas.core.groupby.categorical import recode_for_groupby, recode_from_groupby
from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex
from pandas.core.series import Series
from pandas.io.formats.printing import pprint_thing
class Grouper:
"""
A Grouper allows the user to specify a groupby instruction for an object.
This specification will select a column via the key parameter, or if the
level and/or axis parameters are given, a level of the index of the target
object.
If `axis` and/or `level` are passed as keywords to both `Grouper` and
`groupby`, the values passed to `Grouper` take precedence.
Parameters
----------
key : str, defaults to None
Groupby key, which selects the grouping column of the target.
level : name/number, defaults to None
The level for the target index.
freq : str / frequency object, defaults to None
This will groupby the specified frequency if the target selection
(via key or level) is a datetime-like object. For full specification
of available frequencies, please see `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_.
axis : str, int, defaults to 0
Number/name of the axis.
sort : bool, default to False
Whether to sort the resulting labels.
closed : {'left' or 'right'}
Closed end of interval. Only when `freq` parameter is passed.
label : {'left' or 'right'}
Interval boundary to use for labeling.
Only when `freq` parameter is passed.
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex and `freq` parameter is passed.
base : int, default 0
Only when `freq` parameter is passed.
loffset : str, DateOffset, timedelta object
Only when `freq` parameter is passed.
Returns
-------
A specification for a groupby instruction
Examples
--------
Syntactic sugar for ``df.groupby('A')``
>>> df.groupby(Grouper(key='A'))
Specify a resample operation on the column 'date'
>>> df.groupby(Grouper(key='date', freq='60s'))
Specify a resample operation on the level 'date' on the columns axis
with a frequency of 60s
>>> df.groupby(Grouper(level='date', freq='60s', axis=1))
"""
_attributes: Tuple[str, ...] = ("key", "level", "freq", "axis", "sort")
def __new__(cls, *args, **kwargs):
if kwargs.get("freq") is not None:
from pandas.core.resample import TimeGrouper
cls = TimeGrouper
return super().__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=0, sort=False):
self.key = key
self.level = level
self.freq = freq
self.axis = axis
self.sort = sort
self.grouper = None
self.obj = None
self.indexer = None
self.binner = None
self._grouper = None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj, validate: bool = True):
"""
Parameters
----------
obj : the subject object
validate : boolean, default True
if True, validate the grouper
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
self.grouper, _, self.obj = get_grouper(
self.obj,
[self.key],
axis=self.axis,
level=self.level,
sort=self.sort,
validate=validate,
)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj: FrameOrSeries, sort: bool = False):
"""
given an object and the specifications, setup the internal grouper
for this particular specification
Parameters
----------
obj : Series or DataFrame
sort : bool, default False
whether the resulting grouper should be sorted
"""
assert obj is not None
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# Keep self.grouper value before overriding
if self._grouper is None:
self._grouper = self.grouper
# the key must be a valid info item
if self.key is not None:
key = self.key
# The 'on' is already defined
if getattr(self.grouper, "name", None) == key and isinstance(
obj, ABCSeries
):
ax = self._grouper.take(obj.index)
else:
if key not in obj._info_axis:
raise KeyError(f"The grouper name {key} is not found")
ax = Index(obj[key], name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
ax = Index(ax._get_level_values(level), name=ax.names[level])
else:
if level not in (0, ax.name):
raise ValueError(f"The level {level} is not valid")
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
# use stable sort to support first, last, nth
indexer = self.indexer = ax.argsort(kind="mergesort")
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis)
self.obj = obj
self.grouper = ax
return self.grouper
@property
def groups(self):
return self.grouper.groups
def __repr__(self) -> str:
attrs_list = (
f"{attr_name}={repr(getattr(self, attr_name))}"
for attr_name in self._attributes
if getattr(self, attr_name) is not None
)
attrs = ", ".join(attrs_list)
cls_name = type(self).__name__
return f"{cls_name}({attrs})"
class Grouping:
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj Union[DataFrame, Series]:
name :
level :
observed : bool, default False
If we are a Categorical, use the observed values
in_axis : if the Grouping is a column in self.obj and hence among
Groupby.exclusions list
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* codes : ndarray, group codes
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(
self,
index: Index,
grouper=None,
obj: Optional[FrameOrSeries] = None,
name=None,
level=None,
sort: bool = True,
observed: bool = False,
in_axis: bool = False,
):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.all_grouper = None
self.index = index
self.sort = sort
self.obj = obj
self.observed = observed
self.in_axis = in_axis
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# we have a single grouper which may be a myriad of things,
# some of which are dependent on the passing in level
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError(f"Level {level} not in index")
level = index.names.index(level)
if self.name is None:
self.name = index.names[level]
(
self.grouper,
self._codes,
self._group_index,
) = index._get_grouper_for_level(self.grouper, level)
# a passed Grouper like, directly get the grouper in the same way
# as single grouper groupby, use the group_info to get codes
elif isinstance(self.grouper, Grouper):
# get the new grouper; we already have disambiguated
# what key/level refer to exactly, don't need to
# check again as we have by this point converted these
# to an actual value (rather than a pd.Grouper)
_, grouper, _ = self.grouper._get_grouper(self.obj, validate=False)
if self.name is None:
self.name = grouper.result_index.name
self.obj = self.grouper.obj
self.grouper = grouper._get_grouper()
else:
if self.grouper is None and self.name is not None and self.obj is not None:
self.grouper = self.obj[self.name]
elif isinstance(self.grouper, (list, tuple)):
self.grouper = com.asarray_tuplesafe(self.grouper)
# a passed Categorical
elif is_categorical_dtype(self.grouper):
self.grouper, self.all_grouper = recode_for_groupby(
self.grouper, self.sort, observed
)
categories = self.grouper.categories
# we make a CategoricalIndex out of the cat grouper
# preserving the categories / ordered attributes
self._codes = self.grouper.codes
if observed:
codes = algorithms.unique1d(self.grouper.codes)
codes = codes[codes != -1]
if sort or self.grouper.ordered:
codes = np.sort(codes)
else:
codes = np.arange(len(categories))
self._group_index = CategoricalIndex(
Categorical.from_codes(
codes=codes, categories=categories, ordered=self.grouper.ordered
),
name=self.name,
)
# we are done
if isinstance(self.grouper, Grouping):
self.grouper = self.grouper.grouper
# no level passed
elif not isinstance(
self.grouper, (Series, Index, ExtensionArray, np.ndarray)
):
if getattr(self.grouper, "ndim", 1) != 1:
t = self.name or str(type(self.grouper))
raise ValueError(f"Grouper for '{t}' not 1-dimensional")
self.grouper = self.index.map(self.grouper)
if not (
hasattr(self.grouper, "__len__")
and len(self.grouper) == len(self.index)
):
grper = pprint_thing(self.grouper)
errmsg = (
"Grouper result violates len(labels) == "
f"len(data)\nresult: {grper}"
)
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have
# Timestamps like
if getattr(self.grouper, "dtype", None) is not None:
if is_datetime64_dtype(self.grouper):
self.grouper = self.grouper.astype("datetime64[ns]")
elif is_timedelta64_dtype(self.grouper):
self.grouper = self.grouper.astype("timedelta64[ns]")
def __repr__(self) -> str:
return f"Grouping({self.name})"
def __iter__(self):
return iter(self.indices)
_codes: Optional[np.ndarray] = None
_group_index: Optional[Index] = None
@property
def ngroups(self) -> int:
return len(self.group_index)
@cache_readonly
def indices(self):
# we have a list of groupers
if isinstance(self.grouper, ops.BaseGrouper):
return self.grouper.indices
values = ensure_categorical(self.grouper)
return values._reverse_indexer()
@property
def codes(self) -> np.ndarray:
if self._codes is None:
self._make_codes()
return self._codes
@cache_readonly
def result_index(self) -> Index:
if self.all_grouper is not None:
return recode_from_groupby(self.all_grouper, self.sort, self.group_index)
return self.group_index
@property
def group_index(self) -> Index:
if self._group_index is None:
self._make_codes()
assert self._group_index is not None
return self._group_index
def _make_codes(self) -> None:
if self._codes is None or self._group_index is None:
# we have a list of groupers
if isinstance(self.grouper, ops.BaseGrouper):
codes = self.grouper.codes_info
uniques = self.grouper.result_index
else:
codes, uniques = algorithms.factorize(self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._codes = codes
self._group_index = uniques
@cache_readonly
def groups(self) -> Dict[Hashable, np.ndarray]:
return self.index.groupby(Categorical.from_codes(self.codes, self.group_index))
def get_grouper(
obj: FrameOrSeries,
key=None,
axis: int = 0,
level=None,
sort: bool = True,
observed: bool = False,
mutated: bool = False,
validate: bool = True,
) -> "Tuple[ops.BaseGrouper, List[Hashable], FrameOrSeries]":
"""
Create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
If observed & we have a categorical grouper, only show the observed
values.
If validate, then check for key/level overlaps.
"""
group_axis = obj._get_axis(axis)
# validate that the passed single level is compatible with the passed
# axis of the object
if level is not None:
# TODO: These if-block and else-block are almost same.
# MultiIndex instance check is removable, but it seems that there are
# some processes only for non-MultiIndex in else-block,
# eg. `obj.index.name != level`. We have to consider carefully whether
# these are applicable for MultiIndex. Even if these are applicable,
# we need to check if it makes no side effect to subsequent processes
# on the outside of this condition.
# (GH 17621)
if isinstance(group_axis, MultiIndex):
if is_list_like(level) and len(level) == 1:
level = level[0]
if key is None and is_scalar(level):
# Get the level values from group_axis
key = group_axis.get_level_values(level)
level = None
else:
# allow level to be a length-one list-like object
# (e.g., level=[0])
# GH 13901
if is_list_like(level):
nlevels = len(level)
if nlevels == 1:
level = level[0]
elif nlevels == 0:
raise ValueError("No group keys passed!")
else:
raise ValueError("multiple levels only valid with MultiIndex")
if isinstance(level, str):
if obj._get_axis(axis).name != level:
raise ValueError(
f"level name {level} is not the name "
f"of the {obj._get_axis_name(axis)}"
)
elif level > 0 or level < -1:
raise ValueError("level > 0 or level < -1 only valid with MultiIndex")
# NOTE: `group_axis` and `group_axis.get_level_values(level)`
# are same in this section.
level = None
key = group_axis
# a passed-in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj, validate=False)
if key.key is None:
return grouper, [], obj
else:
return grouper, [key.key], obj
# already have a BaseGrouper, just return it
elif isinstance(key, ops.BaseGrouper):
return key, [], obj
if not isinstance(key, list):
keys = [key]
match_axis_length = False
else:
keys = key
match_axis_length = len(keys) == len(group_axis)
# what are we after, exactly?
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_groupers = any(isinstance(g, Grouper) for g in keys)
any_arraylike = any(
isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys
)
# is this an index replacement?
if (
not any_callable
and not any_arraylike
and not any_groupers
and match_axis_length
and level is None
):
if isinstance(obj, DataFrame):
all_in_columns_index = all(
g in obj.columns or g in obj.index.names for g in keys
)
else:
assert isinstance(obj, Series)
all_in_columns_index = all(g in obj.index.names for g in keys)
if not all_in_columns_index:
keys = [com.asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings: List[Grouping] = []
exclusions: List[Hashable] = []
# if the actual grouper should be obj[key]
def is_in_axis(key) -> bool:
if not _is_label_like(key):
items = obj._data.items
try:
items.get_loc(key)
except (KeyError, TypeError):
# TypeError shows up here if we pass e.g. Int64Index
return False
return True
# if the grouper is obj[name]
def is_in_obj(gpr) -> bool:
if not hasattr(gpr, "name"):
return False
try:
return gpr is obj[gpr.name]
except (KeyError, IndexError):
return False
for i, (gpr, level) in enumerate(zip(keys, levels)):
if is_in_obj(gpr): # df.groupby(df['name'])
in_axis, name = True, gpr.name
exclusions.append(name)
elif is_in_axis(gpr): # df.groupby('name')
if gpr in obj:
if validate:
obj._check_label_or_level_ambiguity(gpr, axis=axis)
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
elif obj._is_level_reference(gpr, axis=axis):
in_axis, name, level, gpr = False, None, gpr, None
else:
raise KeyError(gpr)
elif isinstance(gpr, Grouper) and gpr.key is not None:
# Add key to exclusions
exclusions.append(gpr.key)
in_axis, name = False, None
else:
in_axis, name = False, None
if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]:
raise ValueError(
f"Length of grouper ({len(gpr)}) and axis ({obj.shape[axis]}) "
"must be same length"
)
# create the Grouping
# allow us to passing the actual Grouping as the gpr
ping = (
Grouping(
group_axis,
gpr,
obj=obj,
name=name,
level=level,
sort=sort,
observed=observed,
in_axis=in_axis,
)
if not isinstance(gpr, Grouping)
else gpr
)
groupings.append(ping)
if len(groupings) == 0 and len(obj):
raise ValueError("No group keys passed!")
elif len(groupings) == 0:
groupings.append(Grouping(Index([], dtype="int"), np.array([], dtype=np.intp)))
# create the internals grouper
grouper = ops.BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated)
return grouper, exclusions, obj
def _is_label_like(val) -> bool:
return isinstance(val, (str, tuple)) or (val is not None and is_scalar(val))
def _convert_grouper(axis: Index, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper._values
else:
return grouper.reindex(axis)._values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise ValueError("Grouper and axis must be same length")
return grouper
else:
return grouper
| 34.52352 | 100 | 0.563536 |
794286b61eaa817b6021909de4b856b5eccd4ee7 | 1,345 | py | Python | lib/main.py | twstrike/tor_guardsim | 743c01b06e28e87ca5ce9e7d7ca3ca9271781cd5 | [
"CC0-1.0"
] | null | null | null | lib/main.py | twstrike/tor_guardsim | 743c01b06e28e87ca5ce9e7d7ca3ca9271781cd5 | [
"CC0-1.0"
] | 1 | 2016-02-22T14:00:01.000Z | 2016-02-24T18:35:05.000Z | lib/main.py | twstrike/tor_guardsim | 743c01b06e28e87ca5ce9e7d7ca3ca9271781cd5 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/python
from __future__ import print_function
from py3hax import *
import tornet
import simtime
import client
def trivialSimulation():
net = tornet.Network(100)
# Decorate the network.
# Uncomment one or two of these at a time, kthx!
#net = tornet.FascistNetwork(net)
#net = tornet.FlakyNetwork(net)
#net = tornet.EvilFilteringNetwork(net)
#net = tornet.SniperNetwork(net)
c = client.Client(net, client.ClientParams())
ok = 0
bad = 0
for period in xrange(30): # one hour each
for subperiod in xrange(30): # two minutes each
if (subperiod % 10) == 0:
# nodes left and arrived
net.do_churn()
# nodes went up and down
net.updateRunning()
for attempts in xrange(6): # 20 sec each
# actually have the client act.
if c.buildCircuit():
ok += 1
else:
bad += 1
# time passed
simtime.advanceTime(20)
# new consensus
c.updateGuardLists()
print("Successful client circuits (total): %d (%d)" % (ok, (ok + bad)))
print("Percentage of successful circuilts: %f%%"
% ((ok / float(ok + bad)) * 100.0))
if __name__ == '__main__':
trivialSimulation()
| 24.907407 | 75 | 0.559108 |
794286cc1e71f95c262abaf26a24c9e8389ee28a | 6,057 | py | Python | app/decorators/validator.py | Poketnans/capstone-q3 | 38d550a54ff41387534241df85eb8aa8c9b6ba7e | [
"MIT"
] | null | null | null | app/decorators/validator.py | Poketnans/capstone-q3 | 38d550a54ff41387534241df85eb8aa8c9b6ba7e | [
"MIT"
] | 4 | 2022-03-03T12:47:02.000Z | 2022-03-08T18:10:34.000Z | app/decorators/validator.py | Poketnans/capstone-q3 | 38d550a54ff41387534241df85eb8aa8c9b6ba7e | [
"MIT"
] | 1 | 2022-03-17T14:21:30.000Z | 2022-03-17T14:21:30.000Z | from http import HTTPStatus
from re import match
from functools import wraps
from flask import request
import datetime
from app.errors.json_not_found import JSONNotFound
from app.services import get_data
from datetime import datetime, timedelta, timezone
def validator(
user_name: str = None,
date: str = None,
date_schedule: dict = None,
phone: str = None,
cpf: str = None,
zip_code: str = None,
email=None,
password=None,
birthdate: str = None,
interval_date: dict = None
):
'''
Decorator valida os campos do request pelo tipo de campo requerido.
Tipos:
-> Todos os formatos de data são `DD/MM/YYYY`
- `date_schedule`: recebe um objeto dois datetime com data e hora uma de inicio e fim. `date_schedule` Verifica se o formato datetime é valido e se o intervalo da data esta correto.
- `date`: Verifica se o formato da data é valido e se essa data ainda não passou.
- `birthdate`: Verifica se o formato da data é valido.
- `zip_code`: Verifica se o formato CEP é valido. O CEP aceita somente nesse formato `60000-000`.
- `cpf`: Verifica se o formato da CPF é valido. O CPF aceita somente números `12345678901` ou números separados por ponto `123.456.789.01`.
- `email`: Verifica se o formato da email é valido.
- `password`: Verifica se o formato do password é valido. O password aceita somente uma letra Maiuscula , uma minuscula, um número e um caracter especial.
- `phone`: Verifica se o formato do phone é valido. O phone aceita somente números. Lembrando que só são aceitos números de telefones fixos e móveis válidos no Brasil.
- `verify_two`: Verifica se a data atual esta entre este intervalo
Exceções:
`É lançada excesão personalida para cada validação`
'''
def received_function(function):
@wraps(function)
def wrapper(id: int = 0):
try:
regex_bithdate = (
"^(0[1-9]|[12][0-9]|3[01])[\/\-](0[1-9]|1[012])[\/\-]\d{4}$"
)
regex_phone = "^[1-9]{2}(?:[2-8]|9[0-9])[0-9]{3}[0-9]{4}$"
regex_cep = "^[0-9]{5}-[0-9]{3}$"
regex_cpf = "^[0-9]{3}\.?[0-9]{3}\.?[0-9]{3}\.?[0-9]{2}$"
regex_email = "^[\w\.]+@([\w-]+\.)+[\w-]{2,4}$"
regex_password = "^((?=.*[!@#$%^&*()\-_=+{};:,<.>]){1})(?=.*\d)((?=.*[a-z]){1})((?=.*[A-Z]){1}).*$"
request_json: dict = get_data()
if request_json.get(date):
date_now = datetime.now()
pattern = "%d/%m/%Y"
try:
date_passed = datetime.strptime(
request_json[date], pattern)
except ValueError as err:
resp = {
'msg': 'Invalid date format. It must be in the format DD/MM/YYYY'
}
return resp, HTTPStatus.BAD_REQUEST
if date_now >= date_passed:
return {"error": "that date has passed"}, 400
if request_json.get(date_schedule):
pattern = "%d/%m/%Y %H:%M:%S"
tattoo_schedule = request_json.get(date_schedule)
try:
date_now = datetime.utcnow()
start = tattoo_schedule.get("start")
end = tattoo_schedule.get("end")
start = datetime.strptime(
start, pattern)
end = datetime.strptime(end, pattern)
rest_time = end - start
if start.date() != end.date():
return {"error": "the dates are not the same day"}, 400
if(start >= end):
return {"error": "date and hour start smaller date and hour end"}, 400
if rest_time < timedelta(hours=1):
return {"error": "Minimum time of 1 hour per tattoo"}, 400
except ValueError:
return {"error": "datetime in the wrong format. It must be in the format DD/MM/YYYY H:M:S"}, 400
if request_json.get(birthdate):
if not match(regex_bithdate, request_json[birthdate]):
return {"error": "birthdate in format incorrect. It must be in the format DD/MM/YYYY"}, 400
if request_json.get(phone):
if not match(regex_phone, request_json[phone]):
return {"error": "phone in format incorrect"}, 400
if request_json.get(cpf):
if not match(regex_cpf, request_json[cpf]):
return {"error": "cpf in format incorrect"}, 400
if request_json.get(zip_code):
if not match(regex_cep, request_json[zip_code]):
return {"error": "cep in format incorrect"}, 400
if request_json.get(email):
if not match(regex_email, request_json[email]):
return {"error": "email in format incorrect"}, 400
if request_json.get(password):
if not match(regex_password, request_json[password]):
return {
"error": "password in format incorrect",
"should be": "Password must contain at least one letter uppercase, one lowercase, one number and one special character",
}, 400
except JSONNotFound as err:
return {"msg": f"{err.describe}"}, err.status_code
if id:
return function(id)
return function()
return wrapper
return received_function
| 45.541353 | 197 | 0.513621 |
794286f6a6888364ae595fe5a13901a45af18a8b | 2,480 | py | Python | n3ml/connection.py | chatterboy/n3ml | 28b4e25a277e55e734e6054e8239237a5ff7d1f1 | [
"MIT"
] | 11 | 2019-03-15T17:20:54.000Z | 2022-03-01T08:25:36.000Z | n3ml/connection.py | chatterboy/n3ml | 28b4e25a277e55e734e6054e8239237a5ff7d1f1 | [
"MIT"
] | 7 | 2019-03-15T16:02:51.000Z | 2021-12-03T08:17:06.000Z | n3ml/connection.py | chatterboy/n3ml | 28b4e25a277e55e734e6054e8239237a5ff7d1f1 | [
"MIT"
] | 9 | 2019-10-14T12:38:19.000Z | 2021-12-02T04:49:28.000Z | from typing import Type
import torch
import torch.nn
import torch.distributions.distribution
import n3ml.population
import n3ml.learning
class Synapse(torch.nn.Module):
def __init__(self,
source: n3ml.population.Population,
target: n3ml.population.Population,
w: torch.Tensor,
w_min: float = 0.0,
w_max: float = 1.0,
alpha: float = None,
learning_rule: Type[n3ml.learning.LearningRule] = None,
initializer: torch.distributions.distribution.Distribution = None) -> None:
super().__init__()
self.source = source
self.target = target
self.register_buffer('w', w)
self.w_min = w_min
self.w_max = w_max
self.alpha = alpha
if learning_rule is None:
self.learning_rule = learning_rule
else:
self.learning_rule = learning_rule(self)
self.initializer = initializer
def init(self) -> None:
self.w[:] = self.initializer.sample(sample_shape=self.w.size())
def normalize(self) -> None:
if self.alpha is not None:
w_abs_sum = self.w.abs().sum(dim=1).unsqueeze(dim=1)
w_abs_sum[w_abs_sum == 0.0] = 1.0
self.w *= self.alpha / w_abs_sum
def update(self) -> None:
if self.learning_rule is not None:
self.learning_rule.run()
def run(self) -> None:
raise NotImplementedError
class LinearSynapse(Synapse):
def __init__(self,
source: n3ml.population.Population,
target: n3ml.population.Population,
w: torch.Tensor = None,
w_min: float = 0.0,
w_max: float = 1.0,
alpha: float = None,
learning_rule: n3ml.learning.LearningRule = None,
initializer: torch.distributions.distribution.Distribution = None) -> None:
if w is None:
w = torch.zeros(size=(target.neurons, source.neurons))
super().__init__(source, target, w, w_min, w_max, alpha, learning_rule, initializer)
def run(self) -> torch.Tensor:
"""
Non batch processing
self.w.size: [self.target.neurons, self.source.neurons]
self.source.s.size: [self.source.neurons]
"""
return torch.matmul(self.w, self.source.s)
class ConvSynapse(Synapse):
pass
| 32.207792 | 92 | 0.57621 |
7942885807c74ba498878ae9ca62678293db9dbf | 981 | py | Python | working_copy_indeed copy.py | jenjhayden/Scrapy | 16bbe7fd30badf242dea95dc8cddfd06b2a911b6 | [
"MIT"
] | null | null | null | working_copy_indeed copy.py | jenjhayden/Scrapy | 16bbe7fd30badf242dea95dc8cddfd06b2a911b6 | [
"MIT"
] | null | null | null | working_copy_indeed copy.py | jenjhayden/Scrapy | 16bbe7fd30badf242dea95dc8cddfd06b2a911b6 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
source = requests.get('https://www.indeed.com/jobs?q=python+developer&l=').text
soup = BeautifulSoup(source,'html5lib')
for jobs in soup.find_all (class_='result'):
try:
title = jobs.h2.text.strip()
except Exception as e:
title = None
print('Job Title:', title)
try:
company = jobs.span.text.strip()
except Exception as e:
company= None
print('Company:', company)
try:
location = jobs.find('span', class_='location').text.strip()
except Exception as e:
location = None
print('Location:', location)
try:
summary = jobs.find('span', class_='summary').text.strip()
except Exception as e:
summary = None
print('Summary:', summary)
try:
salary = jobs.find('span', class_='no-wrap').text.strip()
except Exception as e:
salary = None
print('salary:', salary)
print('------------------')
| 22.813953 | 79 | 0.598369 |
794288e8dad2043a4160e75b7813c4dfa4e423ac | 12,322 | py | Python | test/functional/feature_fee_estimation.py | beirut-boop/syscoin | fb9a2e1cba0489f4a46e41dbd8ba5265e1351c2b | [
"MIT"
] | null | null | null | test/functional/feature_fee_estimation.py | beirut-boop/syscoin | fb9a2e1cba0489f4a46e41dbd8ba5265e1351c2b | [
"MIT"
] | null | null | null | test/functional/feature_fee_estimation.py | beirut-boop/syscoin | fb9a2e1cba0489f4a46e41dbd8ba5265e1351c2b | [
"MIT"
] | 1 | 2021-12-01T07:18:04.000Z | 2021-12-01T07:18:04.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test fee estimation code."""
from decimal import Decimal
import random
from test_framework.messages import CTransaction, CTxIn, CTxOut, COutPoint, ToHex, COIN
from test_framework.script import CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_EQUAL, hash160, OP_TRUE
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
connect_nodes,
satoshi_round,
)
# Construct 2 trivial P2SH's and the ScriptSigs that spend them
# So we can create many transactions without needing to spend
# time signing.
REDEEM_SCRIPT_1 = CScript([OP_1, OP_DROP])
REDEEM_SCRIPT_2 = CScript([OP_2, OP_DROP])
P2SH_1 = CScript([OP_HASH160, hash160(REDEEM_SCRIPT_1), OP_EQUAL])
P2SH_2 = CScript([OP_HASH160, hash160(REDEEM_SCRIPT_2), OP_EQUAL])
# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
SCRIPT_SIG = [CScript([OP_TRUE, REDEEM_SCRIPT_1]), CScript([OP_TRUE, REDEEM_SCRIPT_2])]
def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
"""Create and send a transaction with a random fee.
The transaction pays to a trivial P2SH script, and assumes that its inputs
are of the same form.
The function takes a list of confirmed outputs and unconfirmed outputs
and attempts to use the confirmed list first for its inputs.
It adds the newly created outputs to the unconfirmed list.
Returns (raw transaction, fee)."""
# It's best to exponentially distribute our random fees
# because the buckets are exponentially spaced.
# Exponentially distributed from 1-128 * fee_increment
rand_fee = float(fee_increment) * (1.1892 ** random.randint(0, 28))
# Total fee ranges from min_fee to min_fee + 127*fee_increment
fee = min_fee - fee_increment + satoshi_round(rand_fee)
tx = CTransaction()
total_in = Decimal("0.00000000")
while total_in <= (amount + fee) and len(conflist) > 0:
t = conflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
while total_in <= (amount + fee) and len(unconflist) > 0:
t = unconflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount + fee, total_in))
tx.vout.append(CTxOut(int((total_in - amount - fee) * COIN), P2SH_1))
tx.vout.append(CTxOut(int(amount * COIN), P2SH_2))
# These transactions don't need to be signed, but we still have to insert
# the ScriptSig that will satisfy the ScriptPubKey.
for inp in tx.vin:
inp.scriptSig = SCRIPT_SIG[inp.prevout.n]
txid = from_node.sendrawtransaction(hexstring=ToHex(tx), maxfeerate=0)
unconflist.append({"txid": txid, "vout": 0, "amount": total_in - amount - fee})
unconflist.append({"txid": txid, "vout": 1, "amount": amount})
return (ToHex(tx), fee)
def split_inputs(from_node, txins, txouts, initial_split=False):
"""Generate a lot of inputs so we can generate a ton of transactions.
This function takes an input from txins, and creates and sends a transaction
which splits the value into 2 outputs which are appended to txouts.
Previously this was designed to be small inputs so they wouldn't have
a high coin age when the notion of priority still existed."""
prevtxout = txins.pop()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(prevtxout["txid"], 16), prevtxout["vout"]), b""))
half_change = satoshi_round(prevtxout["amount"] / 2)
rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
tx.vout.append(CTxOut(int(half_change * COIN), P2SH_1))
tx.vout.append(CTxOut(int(rem_change * COIN), P2SH_2))
# If this is the initial split we actually need to sign the transaction
# Otherwise we just need to insert the proper ScriptSig
if (initial_split):
completetx = from_node.signrawtransactionwithwallet(ToHex(tx))["hex"]
else:
tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]]
completetx = ToHex(tx)
txid = from_node.sendrawtransaction(hexstring=completetx, maxfeerate=0)
txouts.append({"txid": txid, "vout": 0, "amount": half_change})
txouts.append({"txid": txid, "vout": 1, "amount": rem_change})
def check_raw_estimates(node, fees_seen):
"""Call estimaterawfee and verify that the estimates meet certain invariants."""
delta = 1.0e-6 # account for rounding error
for i in range(1, 26):
for _, e in node.estimaterawfee(i).items():
feerate = float(e["feerate"])
assert_greater_than(feerate, 0)
if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
% (feerate, min(fees_seen), max(fees_seen)))
def check_smart_estimates(node, fees_seen):
"""Call estimatesmartfee and verify that the estimates meet certain invariants."""
delta = 1.0e-6 # account for rounding error
last_feerate = float(max(fees_seen))
all_smart_estimates = [node.estimatesmartfee(i) for i in range(1, 26)]
for i, e in enumerate(all_smart_estimates): # estimate is for i+1
feerate = float(e["feerate"])
assert_greater_than(feerate, 0)
if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
% (feerate, min(fees_seen), max(fees_seen)))
if feerate - delta > last_feerate:
raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
% (feerate, last_feerate))
last_feerate = feerate
if i == 0:
assert_equal(e["blocks"], 2)
else:
assert_greater_than_or_equal(i + 1, e["blocks"])
def check_estimates(node, fees_seen):
check_raw_estimates(node, fees_seen)
check_smart_estimates(node, fees_seen)
class EstimateFeeTest(SyscoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
# mine non-standard txs (e.g. txs with "dust" outputs)
# Force fSendTrickle to true (via whitelist.noban)
self.extra_args = [
["-acceptnonstdtxn", "[email protected]"],
["-acceptnonstdtxn", "[email protected]", "-blockmaxweight=68000"],
["-acceptnonstdtxn", "[email protected]", "-blockmaxweight=32000"],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
"""
We'll setup the network to have 3 nodes that all mine with different parameters.
But first we need to use one node to create a lot of outputs
which we will use to generate our transactions.
"""
self.add_nodes(3, extra_args=self.extra_args)
# Use node0 to mine blocks for input splitting
# Node1 mines small blocks but that are bigger than the expected transaction rate.
# NOTE: the CreateNewBlock code starts counting block weight at 4,000 weight,
# (68k weight is room enough for 120 or so transactions)
# Node2 is a stingy miner, that
# produces too small blocks (room for only 55 or so transactions)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
self.stop_nodes()
def transact_and_mine(self, numblocks, mining_node):
min_fee = Decimal("0.00001")
# We will now mine numblocks blocks generating on average 100 transactions between each block
# We shuffle our confirmed txout set before each set of transactions
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
# resorting to tx's that depend on the mempool when those run out
for i in range(numblocks):
random.shuffle(self.confutxo)
for j in range(random.randrange(100 - 50, 100 + 50)):
from_index = random.randint(1, 2)
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
self.memutxo, Decimal("0.005"), min_fee, min_fee)
tx_kbytes = (len(txhex) // 2) / 1000.0
self.fees_per_kb.append(float(fee) / tx_kbytes)
self.sync_mempools(wait=.1)
mined = mining_node.getblock(mining_node.generate(1)[0], True)["tx"]
self.sync_blocks(wait=.1)
# update which txouts are confirmed
newmem = []
for utx in self.memutxo:
if utx["txid"] in mined:
self.confutxo.append(utx)
else:
newmem.append(utx)
self.memutxo = newmem
def run_test(self):
self.log.info("This test is time consuming, please be patient")
self.log.info("Splitting inputs so we can generate tx's")
# Start node0
self.start_node(0)
self.txouts = []
self.txouts2 = []
# Split a coinbase into two transaction puzzle outputs
split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
# Mine
while len(self.nodes[0].getrawmempool()) > 0:
self.nodes[0].generate(1)
# Repeatedly split those 2 outputs, doubling twice for each rep
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
reps = 0
while reps < 5:
# Double txouts to txouts2
while len(self.txouts) > 0:
split_inputs(self.nodes[0], self.txouts, self.txouts2)
while len(self.nodes[0].getrawmempool()) > 0:
self.nodes[0].generate(1)
# Double txouts2 to txouts
while len(self.txouts2) > 0:
split_inputs(self.nodes[0], self.txouts2, self.txouts)
while len(self.nodes[0].getrawmempool()) > 0:
self.nodes[0].generate(1)
reps += 1
self.log.info("Finished splitting")
# Now we can connect the other nodes, didn't want to connect them earlier
# so the estimates would not be affected by the splitting transactions
self.start_node(1)
self.start_node(2)
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[2], 1)
self.sync_all()
self.fees_per_kb = []
self.memutxo = []
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
self.log.info("Will output estimates for 1/2/3/6/15/25 blocks")
for i in range(2):
self.log.info("Creating transactions and mining them with a block size that can't keep up")
# Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine
self.transact_and_mine(10, self.nodes[2])
check_estimates(self.nodes[1], self.fees_per_kb)
self.log.info("Creating transactions and mining them at a block size that is just big enough")
# Generate transactions while mining 10 more blocks, this time with node1
# which mines blocks with capacity just above the rate that transactions are being created
self.transact_and_mine(10, self.nodes[1])
check_estimates(self.nodes[1], self.fees_per_kb)
# Finish by mining a normal-sized block:
while len(self.nodes[1].getrawmempool()) > 0:
self.nodes[1].generate(1)
self.sync_blocks(self.nodes[0:3], wait=.1)
self.log.info("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb)
if __name__ == '__main__':
EstimateFeeTest().main()
| 45.806691 | 110 | 0.651355 |
79428966945bf502e08c62405af8bfab9b1d69dc | 2,625 | py | Python | tests/unittests/socket_chat.py | FI18-Trainees/FISocketChat | a3c9f9ec502e1b7961716ac4f8ccb14e145e4f86 | [
"MIT"
] | 4 | 2019-09-19T12:46:52.000Z | 2019-12-02T13:51:13.000Z | tests/unittests/socket_chat.py | FI18-Trainees/FISocketChat | a3c9f9ec502e1b7961716ac4f8ccb14e145e4f86 | [
"MIT"
] | 102 | 2019-09-20T06:56:15.000Z | 2021-12-19T23:33:06.000Z | tests/unittests/socket_chat.py | FI18-Trainees/FISocketChat | a3c9f9ec502e1b7961716ac4f8ccb14e145e4f86 | [
"MIT"
] | null | null | null | import unittest
from test_objs import SocketIOConnection
class TestBasicChat(unittest.TestCase):
def test_start(self):
print("Establishing connection")
sockets = SocketIOConnection()
print("Testing connection")
self.assertTrue(sockets.online_status)
self.assertEqual(sockets.status.get("count", 0), 1)
self.assertFalse(sockets.status.get("loginmode", True))
# ===========================================================================
print("Sending message")
sockets.send_message("test_user", "test_message")
print("Check received messages")
self.assertEqual(len(sockets.messages), 1)
self.assertEqual(len(sockets.errors), 0)
x = sockets.messages[0]
y = {"content": "test_message", "content_type": "message"} # expected
shared_items = {k: x[k] for k in x if k in y and x[k] == y[k]}
self.assertEqual(shared_items, y)
x = sockets.messages[0].get("author", {})
y = {"username": "test_user", "display_name": "test_user"} # expected
shared_items = {k: x[k] for k in x if k in y and x[k] == y[k]}
self.assertEqual(shared_items, y)
self.assertIn("avatar", x)
self.assertIn("chat_color", x)
# ===========================================================================
print("Sending message with invalid username")
sockets.send_message("", "test_message")
self.assertEqual(len(sockets.messages), 1)
self.assertEqual(len(sockets.errors), 1)
x = sockets.errors[0]
y = {"message": "invalid username"} # expected
shared_items = {k: x[k] for k in x if k in y and x[k] == y[k]}
self.assertEqual(shared_items, y)
# ===========================================================================
print("Sending message with invalid message")
sockets.send_message("test_user", "")
self.assertEqual(len(sockets.messages), 1)
self.assertEqual(len(sockets.errors), 2)
x = sockets.errors[1]
y = {"message": "invalid message"} # expected
shared_items = {k: x[k] for k in x if k in y and x[k] == y[k]}
self.assertEqual(shared_items, y)
# ===========================================================================
print("Sending message with emoji and test replacement")
sockets.send_message("test_user", " Shawn abc")
self.assertEqual(len(sockets.messages), 2)
self.assertEqual(len(sockets.errors), 2)
self.assertIn("img", sockets.messages[1].get("content", ""))
| 44.491525 | 85 | 0.540952 |
794289cb595d8f314d6a3cc7f9e2e0f4767588a5 | 277 | py | Python | python-syntax-exercise/python-syntax/any7.py | ryankrdh/Springboard-Assignments | 9c9b132a814fc818810978dce1f33c4052028353 | [
"MIT"
] | null | null | null | python-syntax-exercise/python-syntax/any7.py | ryankrdh/Springboard-Assignments | 9c9b132a814fc818810978dce1f33c4052028353 | [
"MIT"
] | null | null | null | python-syntax-exercise/python-syntax/any7.py | ryankrdh/Springboard-Assignments | 9c9b132a814fc818810978dce1f33c4052028353 | [
"MIT"
] | null | null | null | def any7(nums):
"""Are any of these numbers a 7? (True/False)"""
# YOUR CODE HERE
for num in nums:
if num == 7:
return True
return False
print("should be true", any7([1, 2, 7, 4, 5]))
print("should be false", any7([1, 2, 4, 5]))
| 19.785714 | 52 | 0.523466 |
794289cbc830838a332413add8a84ec36dfff7cd | 30 | py | Python | python/testData/intentions/convertTripleQuotedStringInParenthesized_after.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/intentions/convertTripleQuotedStringInParenthesized_after.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/intentions/convertTripleQuotedStringInParenthesized_after.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | t = ("string\n"
"some\n") | 15 | 15 | 0.433333 |
79428a905380d3e3a47f4f0cb29e37a70de0a3de | 300 | py | Python | 10DaysofStatistics/Day0-Weighted-Mean.py | KunyuHe/Hacker-Rank-Practice | b6ffae26fd5b11e7826b7c8aa4f197399ed3c93e | [
"Apache-2.0"
] | null | null | null | 10DaysofStatistics/Day0-Weighted-Mean.py | KunyuHe/Hacker-Rank-Practice | b6ffae26fd5b11e7826b7c8aa4f197399ed3c93e | [
"Apache-2.0"
] | null | null | null | 10DaysofStatistics/Day0-Weighted-Mean.py | KunyuHe/Hacker-Rank-Practice | b6ffae26fd5b11e7826b7c8aa4f197399ed3c93e | [
"Apache-2.0"
] | null | null | null | # Enter your code here. Read input from STDIN. Print output to STDOUT
size = int(input())
nums = list(map(int, input().split()))
weights = list(map(int, input().split()))
weighted_sum = 0
for i in range(size):
weighted_sum += nums[i] * weights[i]
print(round(weighted_sum / sum(weights), 1))
| 23.076923 | 69 | 0.676667 |
79428ab16b59ff7420ebf1d4c4689fa667ac1c18 | 19,692 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/operations/_ddos_custom_policies_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/operations/_ddos_custom_policies_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/operations/_ddos_custom_policies_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DdosCustomPoliciesOperations(object):
"""DdosCustomPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.DdosCustomPolicy"
"""Gets information about the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosCustomPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.DdosCustomPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
parameters, # type: "models.DdosCustomPolicy"
**kwargs # type: Any
):
# type: (...) -> "models.DdosCustomPolicy"
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosCustomPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
parameters, # type: "models.DdosCustomPolicy"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.DdosCustomPolicy"]
"""Creates or updates a DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.DdosCustomPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DdosCustomPolicy or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_12_01.models.DdosCustomPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosCustomPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "models.DdosCustomPolicy"
"""Update a DDoS custom policy tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to update DDoS custom policy resource tags.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosCustomPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.DdosCustomPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
| 48.985075 | 204 | 0.673268 |
79428c007cb5f673c35aad775e047d5ad9cc87ed | 2,139 | py | Python | quickstartup/qs_accounts/admin.py | shahabaz/quickstartup | e351138580d3b332aa309d5d98d562a1ebef5c2c | [
"MIT"
] | 13 | 2015-06-10T03:29:15.000Z | 2021-10-01T22:06:48.000Z | quickstartup/qs_accounts/admin.py | shahabaz/quickstartup | e351138580d3b332aa309d5d98d562a1ebef5c2c | [
"MIT"
] | 47 | 2015-06-10T03:26:18.000Z | 2021-09-22T17:35:24.000Z | quickstartup/qs_accounts/admin.py | shahabaz/quickstartup | e351138580d3b332aa309d5d98d562a1ebef5c2c | [
"MIT"
] | 3 | 2015-07-07T23:55:39.000Z | 2020-04-18T10:34:53.000Z | from django import forms
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.utils.translation import gettext_lazy as _
from .models import User
class UserAdminCreationForm(forms.ModelForm):
password1 = forms.CharField(label=_('Password'), widget=forms.PasswordInput)
password2 = forms.CharField(label=_('Password (verify)'), widget=forms.PasswordInput)
class Meta:
model = get_user_model()
fields = ('name', 'email', 'password1', 'password2', 'is_staff', 'is_superuser')
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
return user
class UserAdminChangeForm(forms.ModelForm):
password = ReadOnlyPasswordHashField()
class Meta:
model = User
fields = ("name", "email", "password", "is_staff", "is_superuser")
def clean_password(self):
return self.initial["password"]
class UserAdmin(admin.ModelAdmin):
form = UserAdminChangeForm
add_form = UserAdminCreationForm
list_display = ("name", "email", "is_staff", "last_login")
list_filter = ("is_staff", "is_active")
fieldsets = (
(None, {"fields": ("name", "email", "password")}),
("Permissions", {"fields": ("is_active", "is_staff")}),
("Important dates", {"fields": ("last_login", "date_joined")}),
)
add_fieldsets = (
(None, {
"classes": ("wide",),
"fields": ("name", "email", "password1", "password2", "is_staff"),
},),
)
search_fields = ("name", "email")
ordering = ("name", "email")
# Enable admin interface if User is the quickstart user model
if get_user_model() is User:
admin.site.register(User, UserAdmin)
| 31.925373 | 89 | 0.656381 |
79428c50f6fd8b19b4cdb1c3cf6a38997060de68 | 1,589 | py | Python | plugins/file/select_file.py | manuelprogramming/OSA | 3a57ea944eef3e8680055a35e8cebd36b93dac51 | [
"MIT",
"Unlicense"
] | 1 | 2022-01-06T21:00:01.000Z | 2022-01-06T21:00:01.000Z | plugins/file/select_file.py | manuelprogramming/OSA | 3a57ea944eef3e8680055a35e8cebd36b93dac51 | [
"MIT",
"Unlicense"
] | null | null | null | plugins/file/select_file.py | manuelprogramming/OSA | 3a57ea944eef3e8680055a35e8cebd36b93dac51 | [
"MIT",
"Unlicense"
] | null | null | null | from dataclasses import dataclass
from typing import Tuple
from tkinter.filedialog import askopenfilename
from tkinter import Tk
from osa import factory
from handlers.result import BaseResult
from handlers.file import get_saving_path, set_setting
@dataclass
class SelectFile:
"""
Opens a File Dialog for selecting a file
"""
command: str
result: BaseResult
def do_work(self) -> BaseResult:
file_name = self.showDialog()
self.result.value = file_name
valid_file_types = (".csv", ".xlsx", ".xls", ".dat", ".DAT", ".txt")
if not self.result.value.endswith(valid_file_types):
self._fail_result(valid_file_types)
return self.result
set_setting("selected_file", file_name)
self.result.msg = f"Selected the file {self.result.value}"
return self.result
def showDialog(self):
root = Tk()
file_type_filter = [("All types", ".*"),
("CSV file", ".csv"),
("Excel files", ".xlsx .xls"),
("Data files", ".dat .DAT"),
("Text files", ".txt")]
saving_path = get_saving_path()
filename = askopenfilename(filetypes=file_type_filter, initialdir=saving_path)
root.destroy()
return filename
def _fail_result(self, valid_file_types: Tuple[str, ...]):
self.result.msg = f"The file chosen has not a valid type. Valid Types are {valid_file_types}"
def initialize() -> None:
factory.register("select_file", SelectFile)
| 29.425926 | 101 | 0.614852 |
79428d1b5c59c73e3b9c0c3137f1bdd1cd326fee | 2,433 | py | Python | leetcode/040_combination_sum_II.py | aiden0z/snippets | c3534ad718599a64f3c7ccdbfe51058e01244c60 | [
"MIT"
] | null | null | null | leetcode/040_combination_sum_II.py | aiden0z/snippets | c3534ad718599a64f3c7ccdbfe51058e01244c60 | [
"MIT"
] | null | null | null | leetcode/040_combination_sum_II.py | aiden0z/snippets | c3534ad718599a64f3c7ccdbfe51058e01244c60 | [
"MIT"
] | null | null | null | """Combination Sum II
Given a collection of candidate numbers (candidates) and a target number (target), find all
unique combinations in candidates where the candidate numbers sums to target.
Each number in candidates may only be used once in the combination.
Note:
* All numbers (including target) will be positive integers.
* The solution set must not contain duplicate combinations.
Example 1:
Input: candidates = [10,1,2,7,6,1,5], target = 8,
A solution set is:
[
[1, 7],
[1, 2, 5],
[2, 6],
[1, 1, 6]
]
Example 2:
Input: candidates = [2,5,2,1,2], target = 5,
A solution set is:
[
[1,2,2],
[5]
]
"""
from typing import List
class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
results = []
self.dfs(sorted(candidates), target, [], results, 0)
return results
def dfs(self, candidates: List[int], target: int, answer: List[int], results: List[List[int]],
i: int):
if target < 0:
return
if target == 0:
results.append(answer)
return
for index in range(i, len(candidates)):
if index > i and candidates[index] == candidates[index - 1]:
continue
self.dfs(candidates, target - candidates[index], answer + [candidates[index]], results,
index + 1)
class SolutionBacktracking:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
results = []
self.find(sorted(candidates), target, [], results, 0)
return results
def find(self, candidates: List[int], target: int, answer: List[int], results: List[List[int]],
start: int):
if target < 0:
return
if target == 0:
results.append(answer[:])
return
for i in range(start, len(candidates)):
if i > start and candidates[i] == candidates[i - 1]:
continue
answer.append(candidates[i])
self.find(candidates, target - candidates[i], answer, results, i + 1)
answer.pop()
if __name__ == '__main__':
cases = [([10, 1, 2, 7, 6, 1, 5], 8), ([2, 5, 2, 1, 2], 5)]
for case in cases:
for S in [Solution, SolutionBacktracking]:
print(S().combinationSum2(case[0], case[1]))
| 25.882979 | 99 | 0.568023 |
79428e70b85cfc59363a7ad6e80599bd46f985b5 | 11,809 | py | Python | utils/image.py | cresposito/BirdCLEF-Baseline | bb95a749d21b62031aa208fe7a12e991eba076ac | [
"MIT"
] | 47 | 2018-04-11T01:15:18.000Z | 2022-01-27T15:30:45.000Z | utils/image.py | cresposito/BirdCLEF-Baseline | bb95a749d21b62031aa208fe7a12e991eba076ac | [
"MIT"
] | null | null | null | utils/image.py | cresposito/BirdCLEF-Baseline | bb95a749d21b62031aa208fe7a12e991eba076ac | [
"MIT"
] | 17 | 2018-04-17T00:26:33.000Z | 2021-12-30T10:02:24.000Z | # This file includes basic functionality for image processing
# including i/o handling, image augmentation and model input pre-processing
# Author: Stefan Kahl, 2018, Chemnitz University of Technology
import sys
sys.path.append("..")
import copy
import numpy as np
import cv2
######################## CONFIG ##########################
import config as cfg
#Fixed random seed
RANDOM = cfg.getRandomState()
def resetRandomState():
global RANDOM
RANDOM = cfg.getRandomState()
########################## I/O ###########################
def openImage(path, im_dim=1):
# Open image
if im_dim == 3:
img = cv2.imread(path, 1)
else:
img = cv2.imread(path, 0)
# Convert to floats between 0 and 1
img = np.asarray(img / 255., dtype='float32')
return img
def showImage(img, name='IMAGE', timeout=-1):
cv2.imshow(name, img)
cv2.waitKey(timeout)
def saveImage(img, path):
cv2.imwrite(path, img)
#################### PRE-PROCESSING ######################
def normalize(img, zero_center=False):
# Normalize
if not img.min() == 0 and not img.max() == 0:
img -= img.min()
img /= img.max()
else:
img = img.clip(0, 1)
# Use values between -1 and 1
if zero_center:
img -= 0.5
img *= 2
return img
def substractMean(img, clip=True):
# Only suitable for RGB images
if len(img.shape) == 3:
# Normalized image?
if img.max() <= 1.0:
img[:, :, 0] -= 0.4850 #B
img[:, :, 1] -= 0.4579 #G
img[:, :, 2] -= 0.4076 #R
else:
img[:, :, 0] -= 123.680 #B
img[:, :, 1] -= 116.779 #G
img[:, :, 2] -= 103.939 #R
else:
img -= np.mean(img)
# Clip values
if clip:
img = img.clip(0, img.max())
return img
def prepare(img):
# ConvNet inputs in Theano are 4D-vectors: (batch size, channels, height, width)
# Add axis if grayscale image
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
# Transpose axis, channels = axis 0
img = np.transpose(img, (2, 0, 1))
# Add new dimension
img = np.expand_dims(img, 0)
return img
######################## RESIZING ########################
def resize(img, width, height, mode='squeeze'):
if img.shape[:2] == (height, width):
return img
if mode == 'crop' or mode == 'cropCenter':
img = cropCenter(img, width, height)
elif mode == 'cropRandom':
img = cropRandom(img, width, height)
elif mode == 'fill':
img = fill(img, width, height)
else:
img = squeeze(img, width, height)
return img
def squeeze(img, width, height):
# Squeeze resize: Resize image and ignore aspect ratio
return cv2.resize(img, (width, height), interpolation=cv2.INTER_CUBIC)
def cropRandom(img, width, height):
# Random crop: Scale shortest side to minsize, crop with random offset
# Original image shape
h, w = img.shape[:2]
aspect_ratio = float(max(h, w)) / float(min(h, w))
# Scale original image
minsize = int(max(width, height) * 1.1)
if w <= h and w < minsize:
img = squeeze(img, minsize, int(minsize * aspect_ratio))
elif h < w and h < minsize:
img = squeeze(img, int(minsize * aspect_ratio), minsize)
#crop with random offset
h, w = img.shape[:2]
top = RANDOM.randint(0, h - height)
left = RANDOM.randint(0, w - width)
new_img = img[top:top + height, left:left + width]
return new_img
def cropCenter(img, width, height):
# Center crop: Scale shortest side, crop longer side
# Original image shape
h, w = img.shape[:2]
aspect_ratio = float(max(h, w)) / float(min(h, w))
# Scale original image
if w == h:
img = squeeze(img, max(width, height), max(width, height))
elif width >= height:
if h >= w:
img = squeeze(img, width, int(width * aspect_ratio))
else:
img = squeeze(img, int(height * aspect_ratio), height)
else:
if h >= w:
img = squeeze(img, int(height / aspect_ratio), height)
else:
img = squeeze(img, int(height * aspect_ratio), height)
#Crop from original image
top = (img.shape[0] - height) // 2
left = (img.shape[1] - width) // 2
new_img = img[top:top + height, left:left + width]
return new_img
def fill(img, width, height):
# Fill mode: Scale longest side, pad shorter side with noise
# Determine new shape
try:
new_shape = (height, width, img.shape[2])
except:
new_shape = (height, width)
# Allocate array with noise
new_img = RANDOM.normal(0.0, 1.0, new_shape)
# Original image shape
h, w = img.shape[:2]
aspect_ratio = float(max(h, w)) / float(min(h, w))
# Scale original image
if w == h:
img = squeeze(img, min(width, height), min(width, height))
elif width >= height:
if h >= w:
img = squeeze(img, int(height / aspect_ratio), height)
else:
img = squeeze(img, width, int(width / aspect_ratio))
else:
if h >= w:
img = squeeze(img, width, int(width * aspect_ratio))
else:
img = squeeze(img, width, int(width / aspect_ratio))
# Place original image at center of new image
top = (height - img.shape[0]) // 2
left = (width - img.shape[1]) // 2
new_img[top:top + img.shape[0], left:left + img.shape[1]] = img
return new_img
###################### AUGMENTATION ######################
def augment(img, augmentation={}, count=3, probability=0.5):
# Make working copy
augmentations = copy.deepcopy(augmentation)
# Choose number of augmentations according to count
# Count = 3 means either 0, 1, 2 or 3 different augmentations
while(count > 0 and len(augmentations) > 0):
# Roll the dice if we do augment or not
if RANDOM.choice([True, False], p=[probability, 1 - probability]):
# Choose one method
aug = RANDOM.choice(augmentations.keys())
# Call augementation methods
if aug == 'flip':
img = flip(img, augmentations[aug])
elif aug == 'rotate':
img = rotate(img, augmentations[aug])
elif aug == 'zoom':
img = zoom(img, augmentations[aug])
elif aug == 'crop':
if isinstance(augmentations[aug], float):
img = crop(img, top=augmentations[aug], left=augmentations[aug], right=augmentations[aug], bottom=augmentations[aug])
else:
img = crop(img, top=augmentations[aug][0], left=augmentations[aug][1], bottom=augmentations[aug][2], right=augmentations[aug][3])
elif aug == 'roll':
img = roll(img, vertical=augmentations[aug], horizontal=augmentations[aug])
elif aug == 'roll_v':
img = roll(img, vertical=augmentations[aug], horizontal=0)
elif aug == 'roll_h':
img = roll(img, vertical=0, horizontal=augmentations[aug])
elif aug == 'mean':
img = mean(img, augmentations[aug])
elif aug == 'noise':
img = noise(img, augmentations[aug])
elif aug == 'dropout':
img = dropout(img, augmentations[aug])
elif aug == 'blackout':
img = blackout(img, augmentations[aug])
elif aug == 'blur':
img = blur(img, augmentations[aug])
elif aug == 'brightness':
img = brightness(img, augmentations[aug])
elif aug == 'multiply':
img = randomMultiply(img, augmentations[aug])
elif aug == 'hue':
img = hue(img, augmentations[aug])
elif aug == 'lightness':
img = lightness(img, augmentations[aug])
elif aug == 'add':
img = add(img, augmentations[aug])
else:
pass
# Remove key so we avoid duplicate augmentations
del augmentations[aug]
# Count (even if we did not augment)
count -= 1
return img
def flip(img, flip_axis=1):
return cv2.flip(img, flip_axis)
def rotate(img, angle, zoom=1.0):
h, w = img.shape[:2]
M = cv2.getRotationMatrix2D((w / 2, h / 2), RANDOM.uniform(-angle, angle), zoom)
return cv2.warpAffine(img, M,(w, h))
def zoom(img, amount=0.33):
h, w = img.shape[:2]
M = cv2.getRotationMatrix2D((w / 2, h / 2), 0, 1 + RANDOM.uniform(0, amount))
return cv2.warpAffine(img, M,(w, h))
def crop(img, top=0.1, left=0.1, bottom=0.1, right=0.1):
h, w = img.shape[:2]
t_crop = max(1, int(h * RANDOM.uniform(0, top)))
l_crop = max(1, int(w * RANDOM.uniform(0, left)))
b_crop = max(1, int(h * RANDOM.uniform(0, bottom)))
r_crop = max(1, int(w * RANDOM.uniform(0, right)))
img = img[t_crop:-b_crop, l_crop:-r_crop]
img = squeeze(img, w, h)
return img
def roll(img, vertical=0.1, horizontal=0.1):
# Vertical Roll
img = np.roll(img, int(img.shape[0] * RANDOM.uniform(-vertical, vertical)), axis=0)
# Horizontal Roll
img = np.roll(img, int(img.shape[1] * RANDOM.uniform(-horizontal, horizontal)), axis=1)
return img
def mean(img, normalize=True):
img = substractMean(img, True)
if normalize and not img.max() == 0:
img /= img.max()
return img
def noise(img, amount=0.05):
img += RANDOM.normal(0.0, RANDOM.uniform(0, amount**0.5), img.shape)
img = np.clip(img, 0.0, 1.0)
return img
def dropout(img, amount=0.25):
d = RANDOM.uniform(0, 1, img.shape)
d[d <= amount] = 0
d[d > 0] = 1
return img * d
def blackout(img, amount=0.25):
b_width = int(img.shape[1] * amount)
b_start = RANDOM.randint(0, img.shape[1] - b_width)
img[:, b_start:b_start + b_width] = 0
return img
def blur(img, kernel_size=3):
return cv2.blur(img, (kernel_size, kernel_size))
def brightness(img, amount=0.25):
img *= RANDOM.uniform(1 - amount, 1 + amount)
img = np.clip(img, 0.0, 1.0)
return img
def randomMultiply(img, amount=0.25):
img *= RANDOM.uniform(1 - amount, 1 + amount, size=img.shape)
img = np.clip(img, 0.0, 1.0)
return img
def hue(img, amount=0.1):
try:
# Only works with BGR images
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hsv[:, :, 0] *= RANDOM.uniform(1 - amount, 1 + amount)
hsv[:, :, 0].clip(0, 360)
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
except:
pass
return img
def lightness(img, amount=0.25):
try:
# Only works with BGR images
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
lab[:, :, 0] *= RANDOM.uniform(1 - amount, 1 + amount)
lab[:, :, 0].clip(0, 255)
img = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
except:
pass
return img
def add(img, items):
# Choose one item from List
index = RANDOM.randint(len(items))
# Open and resize image
img2 = openImage(items[index], cfg.IM_DIM)
img2 = resize(img2, img.shape[1], img.shape[0])
# Generate random weights
w1 = RANDOM.uniform(1, 2)
w2 = RANDOM.uniform(1, 2)
# Add images and calculate average
img = (img * w1 + img2 * w2) / (w1 + w2)
return img
if __name__ == '__main__':
im_path = '../example/Acadian Flycatcher.png'
img = openImage(im_path, 1)
img = resize(img, 256, 256, mode='fill')
showImage(img)
img = augment(img, {'flip':1}, 3)
showImage(img)
| 27.209677 | 149 | 0.556017 |
79428eb260a0f9cba3e4dc17273e462a9e4b039a | 467 | py | Python | trump.py | ant3h/knife_scraper | 1f5bffce2c6bb5dfdde9f49845d916acd1a92a90 | [
"MIT"
] | null | null | null | trump.py | ant3h/knife_scraper | 1f5bffce2c6bb5dfdde9f49845d916acd1a92a90 | [
"MIT"
] | null | null | null | trump.py | ant3h/knife_scraper | 1f5bffce2c6bb5dfdde9f49845d916acd1a92a90 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# bhq_query.py - module for sopel to query blade head quarters site for knife data
#
# Copyright (c) 2017 Casey Bartlett <[email protected]>
#
# See LICENSE for terms of usage, modification and redistribution.
#
from sopel import *
import random
from itertools import repeat
@module.rule('.*bigly.*')
def trumpSAD(bot, trigger):
bot.say('SAD.')
@module.rule('.*disaster.*')
def trumpDisaster(bot, trigger):
bot.say('TOTAL DISASTER.')
| 23.35 | 82 | 0.713062 |
79429031691626d1a079f253421e5e3bf0ff3e4f | 952 | py | Python | kubernetes_asyncio/test/test_v1_scale_status.py | PidgeyBE/kubernetes_asyncio | 14d15dc309890253c26b6274a022e84441e05217 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/test/test_v1_scale_status.py | PidgeyBE/kubernetes_asyncio | 14d15dc309890253c26b6274a022e84441e05217 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/test/test_v1_scale_status.py | PidgeyBE/kubernetes_asyncio | 14d15dc309890253c26b6274a022e84441e05217 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.13.5
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.v1_scale_status import V1ScaleStatus # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestV1ScaleStatus(unittest.TestCase):
"""V1ScaleStatus unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1ScaleStatus(self):
"""Test V1ScaleStatus"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.v1_scale_status.V1ScaleStatus() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.8 | 124 | 0.721639 |
79429055861649ca489737ee388e742ecf57b8f6 | 974 | py | Python | web/cascade/bomstrip.py | epmoyer/cascade | 79b877d5b19567be2d08c00f5cdc31c8968db4c7 | [
"MIT"
] | null | null | null | web/cascade/bomstrip.py | epmoyer/cascade | 79b877d5b19567be2d08c00f5cdc31c8968db4c7 | [
"MIT"
] | null | null | null | web/cascade/bomstrip.py | epmoyer/cascade | 79b877d5b19567be2d08c00f5cdc31c8968db4c7 | [
"MIT"
] | null | null | null | """ Strip the UTF-8 Byte Order Mark (BOM) from a file (if it exists)
"""
import codecs
BOMLEN = len(codecs.BOM_UTF8)
def copy_and_strip_bom(infilename, outfilename):
"""Copy file into a new file, excluding the BOM (if it exists)
"""
buffer_size = 4096
with open(infilename, "r+b") as infile:
with open(outfilename, "wb") as outfile:
chunk = infile.read(buffer_size)
if chunk.startswith(codecs.BOM_UTF8):
chunk = chunk[BOMLEN:]
while chunk:
outfile.write(chunk)
chunk = infile.read(buffer_size)
def open_and_seek_past_bom(infilename):
"""Open file, seek past BOM (if it exists), and return the handle to the open file object
"""
infile = open(infilename, "r+b")
chunk = infile.read(BOMLEN * 2)
if chunk.startswith(codecs.BOM_UTF8):
infile.seek(BOMLEN)
else:
infile.seek(0)
return infile
| 30.4375 | 94 | 0.602669 |
7942906c909f7ae6cc73ebb728c2336ac0c6587e | 2,687 | py | Python | mailpile/plugins/keylookup/dnspka.py | JocelynDelalande/Mailpile | 3e53a54195a0dd8ca48e7cb3be44dd7b3acabd74 | [
"Apache-2.0"
] | null | null | null | mailpile/plugins/keylookup/dnspka.py | JocelynDelalande/Mailpile | 3e53a54195a0dd8ca48e7cb3be44dd7b3acabd74 | [
"Apache-2.0"
] | null | null | null | mailpile/plugins/keylookup/dnspka.py | JocelynDelalande/Mailpile | 3e53a54195a0dd8ca48e7cb3be44dd7b3acabd74 | [
"Apache-2.0"
] | null | null | null | try:
import DNS
except:
DNS = None
import urllib2
from mailpile.i18n import gettext
from mailpile.plugins.keylookup import LookupHandler
from mailpile.plugins.keylookup import register_crypto_key_lookup_handler
_ = lambda t: t
#
# Support for DNS PKA (_pka) entries.
# See http://www.gushi.org/make-dns-cert/HOWTO.html
#
class DNSPKALookupHandler(LookupHandler):
NAME = _("DNS PKA records")
TIMEOUT = 10
PRIORITY = 100
def __init__(self, *args, **kwargs):
LookupHandler.__init__(self, *args, **kwargs)
if not DNS:
return
DNS.ParseResolvConf()
self.req = DNS.Request(qtype="TXT")
def _score(self, key):
return (9, _('Found key in DNSPKA'))
def _lookup(self, address):
"""
>>> from mailpile.crypto.dnspka import *
>>> d = DNSPKALookup()
>>> res = d.lookup("[email protected]")
>>> res["result"]["count"] == 1
"""
if not DNS:
return {}
dom = address.replace("@", "._pka.")
result = self.req.req(dom)
for res in result.answers:
if res["typename"] != "TXT":
continue
for entry in res["data"]:
return self._keyinfo(entry)
return {}
def _keyinfo(self, entry):
pkaver = None
fingerprint = None
url = None
for stmt in entry.split(";"):
key, value = stmt.split("=", 1)
if key == "v":
pkaver = value
elif key == "fpr":
fingerprint = value
elif key == "uri":
url = value
if pkaver != "pka1":
raise ValueError("We only know how to deal with pka version 1")
return {fingerprint: {"fingerprint": fingerprint, "url": url, "pkaver": pkaver}}
def _getkey(self, key):
if key["fingerprint"] and not key["url"]:
res = self._gnupg().recv_key(key["fingerprint"])
elif key["url"]:
r = urllib2.urlopen(key["url"])
result = r.readlines()
start = 0
end = len(result)
# Hack to deal with possible HTML results from keyservers:
for i in range(len(result)):
if result[i].startswith("-----BEGIN PGP"):
start = i
elif result[i].startswith("-----END PGP"):
end = i
result = "".join(result[start:end])
res = self._gnupg().import_keys(result)
return res
else:
raise ValueError("Need a fingerprint or a URL")
_ = gettext
register_crypto_key_lookup_handler(DNSPKALookupHandler)
| 28.284211 | 88 | 0.536658 |
794290a36bfa6db484892c7c5b77536988afd901 | 227 | py | Python | xbee/__init__.py | AndreRenaud/python-xbee | 803b5267395b66540cc0b12f501932c55b168010 | [
"MIT"
] | 65 | 2015-12-06T02:38:28.000Z | 2017-09-05T16:46:07.000Z | xbee/__init__.py | AndreRenaud/python-xbee | 803b5267395b66540cc0b12f501932c55b168010 | [
"MIT"
] | 44 | 2015-10-23T15:33:54.000Z | 2017-09-01T06:39:50.000Z | xbee/__init__.py | AndreRenaud/python-xbee | 803b5267395b66540cc0b12f501932c55b168010 | [
"MIT"
] | 43 | 2015-12-15T02:52:21.000Z | 2017-06-24T17:14:53.000Z | """
XBee package initalization file
[email protected]
"""
__title__ = 'xbee'
__version__ = '2.3.2'
__author__ = 'n.io'
__license__ = 'MIT'
from xbee.thread import XBee
from xbee.thread import ZigBee
from xbee.thread import DigiMesh
| 15.133333 | 32 | 0.735683 |
7942914816b8c578099f42eb3fc1e0d955b918ba | 6,121 | py | Python | hw_07/hypers_optim.py | coinflip112/ml_101 | 9e56ffdb99ac241ed396e25d7f7818a58ee5c4de | [
"MIT"
] | null | null | null | hw_07/hypers_optim.py | coinflip112/ml_101 | 9e56ffdb99ac241ed396e25d7f7818a58ee5c4de | [
"MIT"
] | null | null | null | hw_07/hypers_optim.py | coinflip112/ml_101 | 9e56ffdb99ac241ed396e25d7f7818a58ee5c4de | [
"MIT"
] | null | null | null | import argparse
import json
import lzma
import os
import pickle
import sys
import urllib.request
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import (
MaxAbsScaler,
MinMaxScaler,
StandardScaler,
PolynomialFeatures,
FunctionTransformer,
)
from target_encoder import TargetEncoder
from search_spaces import (
svc_linear_search_space,
svc_polynomial_search_space,
svc_rbf_search_space,
rf_search_space,
elastic_net_search_space,
gaussian_process_search_space,
knn_search_space,
naive_bayes_search_space,
extra_tree_search_space,
)
from skopt import BayesSearchCV
from skopt.space import Categorical, Integer, Real
class Dataset:
def __init__(
self,
name="binary_classification_competition.train.csv.xz",
url="https://ufal.mff.cuni.cz/~straka/courses/npfl129/1920/datasets/",
):
if not os.path.exists(name):
print("Downloading dataset {}...".format(name), file=sys.stderr)
urllib.request.urlretrieve(url + name, filename=name)
# Load the dataset and split it into `train_target` (column Target)
# and `train_data` (all other columns).
dataset = pd.read_csv(name)
self.data, self.target = dataset.drop("Target", axis=1), dataset["Target"]
if __name__ == "__main__":
train = Dataset()
features = train.data
targets = train.target
categoricals = [
"Workclass",
"Education",
"Marital-status",
"Occupation",
"Relationship",
"Race",
"Native-country",
"Sex",
]
numerics = [
"Age",
"Fnlwgt",
"Education-num",
"Capital-gain",
"Capital-loss",
"Hours-per-week",
]
features_mapping_dict = {"categoricals": categoricals, "numerics": numerics}
numerics_pipeline = lambda: Pipeline(
steps=[
("poly_features", PolynomialFeatures(include_bias=False)),
("scaler", FunctionTransformer(validate=False)),
]
)
preprocessing = lambda features_mapping_dict: ColumnTransformer(
transformers=[
("numerics", numerics_pipeline(), features_mapping_dict["numerics"]),
("categoricals", TargetEncoder(), features_mapping_dict["categoricals"]),
]
)
estimator_svc_linear = Pipeline(
steps=[
("preprocessing", preprocessing(features_mapping_dict)),
(
"estimator",
SGDClassifier(loss="hinge", penalty="elasticnet", max_iter=30000),
),
]
)
estimator_extra_trees = Pipeline(
steps=[
("preprocessing", preprocessing(features_mapping_dict)),
("estimator", ExtraTreesClassifier()),
]
)
estimator_rf = Pipeline(
steps=[
("preprocessing", preprocessing(features_mapping_dict)),
("estimator", RandomForestClassifier()),
]
)
estimator_elastic_net = Pipeline(
steps=[
("preprocessing", preprocessing(features_mapping_dict)),
(
"estimator",
SGDClassifier(max_iter=30000, penalty="elasticnet", loss="log"),
),
]
)
estimator_naive_bayes = Pipeline(
steps=[
("preprocessing", preprocessing(features_mapping_dict)),
("estimator", GaussianNB()),
]
)
estimator_knn = Pipeline(
steps=[
("preprocessing", preprocessing(features_mapping_dict)),
("estimator", KNeighborsClassifier()),
]
)
naive_bayes_opt = BayesSearchCV(
cv=4,
estimator=estimator_naive_bayes,
search_spaces=naive_bayes_search_space,
n_iter=100,
n_jobs=-1,
refit=False,
verbose=3,
)
knn_opt = BayesSearchCV(
cv=4,
estimator=estimator_knn,
search_spaces=knn_search_space,
n_iter=60,
n_jobs=-1,
refit=False,
verbose=3,
)
svc_linear_opt = BayesSearchCV(
cv=4,
estimator=estimator_svc_linear,
search_spaces=svc_linear_search_space,
n_iter=100,
n_jobs=-1,
refit=False,
verbose=3,
)
extra_tree_opt = BayesSearchCV(
cv=4,
estimator=estimator_extra_trees,
search_spaces=extra_tree_search_space,
n_iter=100,
n_jobs=-1,
refit=False,
verbose=3,
)
rf_opt = BayesSearchCV(
cv=4,
estimator=estimator_rf,
search_spaces=rf_search_space,
n_iter=100,
n_jobs=-1,
refit=False,
verbose=3,
)
elastic_net_opt = BayesSearchCV(
cv=4,
estimator=estimator_elastic_net,
search_spaces=elastic_net_search_space,
n_iter=80,
n_jobs=-1,
refit=False,
verbose=3,
)
naive_bayes_opt.fit(features, targets)
knn_opt.fit(features, targets)
svc_linear_opt.fit(features, targets)
extra_tree_opt.fit(features, targets)
rf_opt.fit(features, targets)
elastic_net_opt.fit(features, targets)
best_params = {
"naive_bayes": [naive_bayes_opt.best_params_, naive_bayes_opt.best_score_],
"knn": [knn_opt.best_params_, knn_opt.best_score_],
"svc_linear": [svc_linear_opt.best_params_, svc_linear_opt.best_score_],
"extra_tree": [extra_tree_opt.best_params_, extra_tree_opt.best_score_],
"rf": [rf_opt.best_params_, rf_opt.best_score_],
"elastic_net": [elastic_net_opt.best_params_, elastic_net_opt.best_score_],
}
with open("best_params.params", "wb") as params_to_write:
pickle.dump(best_params, params_to_write)
| 28.207373 | 85 | 0.632576 |
7942917e48e03deb411ce9c358ece7f9a5277cd6 | 8,435 | py | Python | backend/config/settings/production.py | kwkelly/chuckwagon | f6d3ee564b9d895195df223025ac05ef56f78e52 | [
"MIT"
] | null | null | null | backend/config/settings/production.py | kwkelly/chuckwagon | f6d3ee564b9d895195df223025ac05ef56f78e52 | [
"MIT"
] | null | null | null | backend/config/settings/production.py | kwkelly/chuckwagon | f6d3ee564b9d895195df223025ac05ef56f78e52 | [
"MIT"
] | null | null | null | import logging
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['chuckwagon.io'])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES['default'] = env.db('DATABASE_URL') # noqa F405
DATABASES['default']['ATOMIC_REQUESTS'] = True # noqa F405
DATABASES['default']['ENGINE'] = 'django.contrib.gis.db.backends.postgis'
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': env('REDIS_URL'),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
'IGNORE_EXCEPTIONS': True,
}
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool('DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ['storages'] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': f'max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate',
}
# STATIC
# ------------------------
STATICFILES_STORAGE = 'config.settings.production.StaticRootS3Boto3Storage'
STATIC_URL = f'https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/static/'
# MEDIA
# ------------------------------------------------------------------------------
# region http://stackoverflow.com/questions/10390244/
# Full-fledge class: https://stackoverflow.com/a/18046120/104731
from storages.backends.s3boto3 import S3Boto3Storage # noqa E402
class StaticRootS3Boto3Storage(S3Boto3Storage):
location = 'static'
class MediaRootS3Boto3Storage(S3Boto3Storage):
location = 'media'
file_overwrite = False
# endregion
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3Boto3Storage'
MEDIA_URL = f'https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['loaders'] = [ # noqa F405
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
'DJANGO_DEFAULT_FROM_EMAIL',
default='chuckwagon <[email protected]>'
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[chuckwagon]')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ['anymail'] # noqa F405
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
'MAILGUN_API_KEY': env('MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_DOMAIN')
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['gunicorn'] # noqa F405
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ['collectfast'] + INSTALLED_APPS # noqa F405
AWS_PRELOAD_METADATA = True
# raven
# ------------------------------------------------------------------------------
# https://docs.sentry.io/clients/python/integrations/django/
INSTALLED_APPS += ['raven.contrib.django.raven_compat'] # noqa F405
MIDDLEWARE = ['raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware'] + MIDDLEWARE
# Sentry
# ------------------------------------------------------------------------------
SENTRY_DSN = env('SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry'],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'dsn': SENTRY_DSN
}
# Your stuff...
# ------------------------------------------------------------------------------
| 38.870968 | 106 | 0.597747 |
7942919c8e4b89d5a2cb6de509985098011e3375 | 4,968 | py | Python | bsp/stm32/stm32l433-st-nucleo/rtconfig.py | BreederBai/rt-thread | 53ed0314982556dfa9c5db75d4f3e02485d16ab5 | [
"Apache-2.0"
] | null | null | null | bsp/stm32/stm32l433-st-nucleo/rtconfig.py | BreederBai/rt-thread | 53ed0314982556dfa9c5db75d4f3e02485d16ab5 | [
"Apache-2.0"
] | null | null | null | bsp/stm32/stm32l433-st-nucleo/rtconfig.py | BreederBai/rt-thread | 53ed0314982556dfa9c5db75d4f3e02485d16ab5 | [
"Apache-2.0"
] | null | null | null | import os
# toolchains options
ARCH='arm'
CPU='cortex-m4'
CROSS_TOOL='gcc'
# bsp lib config
BSP_LIBRARY_TYPE = None
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iccarm'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.3'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=hard -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M4.fp '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict --scatter "board\linker_scripts\link.sct"'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
CFLAGS += ' -std=c99'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'armclang':
# toolchains
CC = 'armclang'
CXX = 'armclang'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M4.fp '
CFLAGS = ' --target=arm-arm-none-eabi -mcpu=cortex-m4 '
CFLAGS += ' -mcpu=cortex-m4 -mfpu=fpv4-sp-d16 '
CFLAGS += ' -mfloat-abi=hard -c -fno-rtti -funsigned-char -fshort-enums -fshort-wchar '
CFLAGS += ' -gdwarf-3 -ffunction-sections '
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers '
LFLAGS += ' --list rt-thread.map '
LFLAGS += r' --strict --scatter "board\linker_scripts\link.sct" '
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCLANG/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCLANG/lib'
EXEC_PATH += '/ARM/ARMCLANG/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O1' # armclang recommend
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
CFLAGS += ' -std=c99'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iccarm':
# toolchains
CC = 'iccarm'
CXX = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M4'
CFLAGS += ' -e'
CFLAGS += ' --fpu=VFPv4_sp'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M4'
AFLAGS += ' --fpu VFPv4_sp'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
CXXFLAGS = CFLAGS
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
| 27.910112 | 152 | 0.574074 |
794291e89ca343cafca6b7589f59ee13d9af00eb | 2,606 | py | Python | test/convert_test_format.py | TysonAndre/igbinary-hhvm | 0608c54e996ee91fc3b16337654ce7ea5ece7814 | [
"PHP-3.01"
] | null | null | null | test/convert_test_format.py | TysonAndre/igbinary-hhvm | 0608c54e996ee91fc3b16337654ce7ea5ece7814 | [
"PHP-3.01"
] | 2 | 2017-10-20T05:57:58.000Z | 2017-10-20T06:10:01.000Z | test/convert_test_format.py | TysonAndre/igbinary-hhvm | 0608c54e996ee91fc3b16337654ce7ea5ece7814 | [
"PHP-3.01"
] | null | null | null | #!/usr/bin/env python3
import os
import re
import sys
TEST_COMMENT = re.compile(r"--TEST--\r?\n([^\n\r]+)\r?\n--", re.MULTILINE)
FILE_CONTENTS = re.compile(r"--FILE--\r?\n<\?php\s*\r?\n?\s*(.*)(\r?\n?(\?>)?)?\r?\n--EXPECT(F|REGEX)?--", re.MULTILINE | re.DOTALL)
EXPECT_CONTENTS = re.compile(r"--(EXPECT(F|REGEX)?)--\r?\n(.*)$", re.MULTILINE | re.DOTALL)
def get_normalized_filename_as_php(phpt_filename):
phpt_filename = re.sub(r"(^|/)igbinary_([^/.]+.phpt)", r"\1\2", phpt_filename)
php_filename = re.sub("\.phpt$", ".php", phpt_filename)
return php_filename
def parse_phpt_sections(contents, phpt_filename):
comment_match = TEST_COMMENT.search(contents)
if comment_match is None:
sys.stderr.write("Could not find comment in {0}\n".format(phpt_filename))
sys.exit(1)
comment = comment_match.group(1)
php_code_match = FILE_CONTENTS.search(contents)
if php_code_match is None:
sys.stderr.write("Could not find php test code in {0}\n".format(phpt_filename))
sys.exit(1)
php_code = php_code_match.group(1)
expect_match = EXPECT_CONTENTS.search(contents)
if expect_match is None:
sys.stderr.write("Could not find expectated output (EXPECT or EXPECTF) in {0}\n".format(phpt_filename))
sys.exit(1)
is_expectf = expect_match.group(1) in ("EXPECTF", "EXPECTREGEX")
expect = expect_match.group(3)
return [comment, php_code, expect, is_expectf]
def main():
files = sys.argv[1:]
if len(files) == 0:
sys.stderr.write("Usage: {0} path/to/igbinary_0xy.phpt...\n".format(sys.argv[0]))
sys.exit(1)
for filename in files:
if filename[-5:] != '.phpt':
sys.stderr.write("{0} is not a file of type phpt\n".format(filename))
sys.exit(1)
for filename in files:
with open(filename) as file:
contents = file.read()
[comment, php_code, expect, is_expectf] = parse_phpt_sections(contents, filename)
result_filename = get_normalized_filename_as_php(filename)
result_contents = "<?php\n// {0}\n{1}".format(comment.strip().replace("\n", "\n// "), php_code)
with open(result_filename, "w") as result_file:
result_file.write(result_contents)
expect_filename = result_filename + (".expectf" if is_expectf else ".expect")
with open(expect_filename, "w") as expect_file:
expect_file.write(expect)
print("Wrote {0}: {1}".format(result_filename, "; ".join(re.split("\r?\n", comment))))
if __name__ == "__main__":
main()
| 42.721311 | 134 | 0.633922 |
794292985726332c2834f5c787f816ae27f5658a | 621 | py | Python | location.py | burkee75/flask-weather-app | 6c7eecfad83727fc73e4122cfcc926ff607870cd | [
"MIT"
] | 1 | 2020-05-01T22:03:19.000Z | 2020-05-01T22:03:19.000Z | location.py | burkee75/flask-weather-app | 6c7eecfad83727fc73e4122cfcc926ff607870cd | [
"MIT"
] | 1 | 2020-05-04T06:54:28.000Z | 2020-05-04T06:54:28.000Z | location.py | burkee75/flask-weather-app | 6c7eecfad83727fc73e4122cfcc926ff607870cd | [
"MIT"
] | 2 | 2020-05-01T22:03:24.000Z | 2020-05-03T18:56:33.000Z | from mapbox import Geocoder
class MapboxLocation:
def __init__(self, api_key):
self.api_key = api_key
self.geocoder = Geocoder(access_token=self.api_key)
def latitude_longitude(self, zipcode):
self.zipcode = zipcode
response = self.geocoder.forward(self.zipcode, country=['us'])
#print(f'Mapbox Zipcode Lookup HTTP code: {response.status_code}')
# Get Zipcode Center Latitude and Longitude from Mapbox
#print(f"Mapbox Coordinates: {response.json()['features'][0]['center']}") # for debugging
return response.json()['features'][0]['center']
| 32.684211 | 97 | 0.669887 |
7942935c7b79fbb5a7609e45b8ed9d213fe63b12 | 27,425 | py | Python | appdirs.py | eukreign/appdirs | e0176a71a71b16dc5f16dd32268323e4263dcf1d | [
"MIT"
] | null | null | null | appdirs.py | eukreign/appdirs | e0176a71a71b16dc5f16dd32268323e4263dcf1d | [
"MIT"
] | null | null | null | appdirs.py | eukreign/appdirs | e0176a71a71b16dc5f16dd32268323e4263dcf1d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <https://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version__ = "1.4.4"
__version_info__ = tuple(int(segment) for segment in __version__.split("."))
import sys
import os
import re
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith('java'):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
system = 'win32'
elif os_name.startswith('Mac'): # "Mac OS X", etc.
system = 'darwin'
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = 'linux2'
else:
system = sys.platform
def user_download_dir():
r"""Return full path to the user-specific download dir for this application.
Typical user data directories are:
Mac OS X: ~/Downloads
Unix: ~/Downloads # or in $XDG_DOWNLOAD_DIR, if defined
Win 7: C:\Users\<username>\Downloads
For Unix, we follow the XDG spec and support $XDG_DOWNLOAD_DIR.
That means, by default "~/Downloads".
"""
if system == "win32":
return os.path.normpath(_get_win_download_folder_with_ctypes())
elif system == 'darwin':
return os.path.expanduser('~/Downloads')
else:
try:
config_dirs = os.path.join(user_config_dir(), 'user-dirs.dirs')
with open(config_dirs) as dirs_file:
path_match = re.search(r'XDG_DOWNLOAD_DIR=(.+)', dirs_file.read())
cleaned_path = path_match.group(1).replace('"', '').replace('$HOME', '~')
return os.path.expanduser(cleaned_path)
except Exception:
pass
return os.getenv('XDG_DOWNLOAD_DIR', os.path.expanduser("~/Downloads"))
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical site data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user config directories are:
Mac OS X: ~/Library/Preferences/<AppName>
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by default "~/.config/<AppName>".
"""
if system == "win32":
path = user_data_dir(appname, appauthor, None, roaming)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Preferences/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical site config directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == 'win32':
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
elif system == 'darwin':
path = os.path.expanduser('/Library/Preferences')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific state dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user state directories are:
Mac OS X: same as user_data_dir
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
to extend the XDG spec and support $XDG_STATE_HOME.
That means, by default "~/.local/state/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user log directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if system == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname=None, appauthor=None, version=None,
roaming=False, multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_download_dir(self):
return user_download_dir()
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_state_dir(self):
return user_state_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
if PY3:
import winreg as _winreg
else:
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_download_folder_with_ctypes():
import ctypes
from ctypes import windll, wintypes
from uuid import UUID
class GUID(ctypes.Structure):
_fields_ = [
("data1", wintypes.DWORD),
("data2", wintypes.WORD),
("data3", wintypes.WORD),
("data4", wintypes.BYTE * 8)
]
def __init__(self, uuidstr):
ctypes.Structure.__init__(self)
uuid = UUID(uuidstr)
self.data1, self.data2, self.data3, \
self.data4[0], self.data4[1], rest = uuid.fields
for i in range(2, 8):
self.data4[i] = rest >> (8-i-1)*8 & 0xff
SHGetKnownFolderPath = windll.shell32.SHGetKnownFolderPath
SHGetKnownFolderPath.argtypes = [
ctypes.POINTER(GUID), wintypes.DWORD, wintypes.HANDLE, ctypes.POINTER(ctypes.c_wchar_p)
]
FOLDERID_Downloads = '{374DE290-123F-4565-9164-39C4925E467B}'
guid = GUID(FOLDERID_Downloads)
pathptr = ctypes.c_wchar_p()
if SHGetKnownFolderPath(ctypes.byref(guid), 0, 0, ctypes.byref(pathptr)):
raise Exception('Failed to get download directory.')
return pathptr.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernel.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir",
"user_config_dir",
"user_cache_dir",
"user_state_dir",
"user_log_dir",
"site_data_dir",
"site_config_dir")
print("-- app dirs %s --" % __version__)
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- download dir")
print(user_download_dir())
| 39.919942 | 122 | 0.628441 |
7942936918bd00b5774f3070c8ae3a62cf6fe71a | 796 | py | Python | benchmarks/flask_simple/app.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 308 | 2016-12-07T16:49:27.000Z | 2022-03-15T10:06:45.000Z | benchmarks/flask_simple/app.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1,928 | 2016-11-28T17:13:18.000Z | 2022-03-31T21:43:19.000Z | benchmarks/flask_simple/app.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 311 | 2016-11-27T03:01:49.000Z | 2022-03-18T21:34:03.000Z | import random
from flask import Flask
from flask import render_template_string
app = Flask(__name__)
@app.route("/")
def index():
rand_numbers = [random.random() for _ in range(20)]
return render_template_string(
"""
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Hello World!</title>
</head>
<body>
<section class="section">
<div class="container">
<h1 class="title">
Hello World
</h1>
<p class="subtitle">
My first website
</p>
<ul>
{% for i in rand_numbers %}
<li>{{ i }}</li>
{% endfor %}
</ul>
</div>
</section>
</body>
</html>
""",
rand_numbers=rand_numbers,
)
| 18.511628 | 72 | 0.557789 |
79429389032e033e271c1e5fb364ef614bc89fc3 | 3,397 | py | Python | yandex_maps/tests.py | 66ru/yandex-maps | aadacb751bf4b491a8caf00e35f11faeb4a516d3 | [
"MIT"
] | 8 | 2015-02-22T22:32:30.000Z | 2021-02-16T16:50:44.000Z | yandex_maps/tests.py | 66ru/yandex-maps | aadacb751bf4b491a8caf00e35f11faeb4a516d3 | [
"MIT"
] | 4 | 2015-06-24T10:25:54.000Z | 2019-08-20T11:30:05.000Z | yandex_maps/tests.py | 66ru/yandex-maps | aadacb751bf4b491a8caf00e35f11faeb4a516d3 | [
"MIT"
] | 12 | 2015-01-26T09:40:18.000Z | 2019-09-01T14:10:17.000Z | #coding: utf-8
from unittest import TestCase
from yandex_maps.api import _get_coords, get_map_url
RESPONSE = u"""<?xml version="1.0" encoding="utf-8"?>
<ymaps xmlns="http://maps.yandex.ru/ymaps/1.x" xmlns:x="http://www.yandex.ru/xscript">
<GeoObjectCollection>
<metaDataProperty xmlns="http://www.opengis.net/gml">
<GeocoderResponseMetaData xmlns="http://maps.yandex.ru/geocoder/1.x">
<request>Екатеринбург, Свердлова 27</request>
<found>1</found>
<results>10</results>
</GeocoderResponseMetaData>
</metaDataProperty>
<featureMember xmlns="http://www.opengis.net/gml">
<GeoObject xmlns="http://maps.yandex.ru/ymaps/1.x">
<metaDataProperty xmlns="http://www.opengis.net/gml">
<GeocoderMetaData xmlns="http://maps.yandex.ru/geocoder/1.x">
<kind>house</kind>
<text>Россия, Свердловская область, Екатеринбург, улица Свердлова, 27</text>
<precision>number</precision>
<AddressDetails xmlns="urn:oasis:names:tc:ciq:xsdschema:xAL:2.0">
<Country>
<CountryName>Россия</CountryName>
<AdministrativeArea>
<AdministrativeAreaName>Свердловская область</AdministrativeAreaName>
<Locality>
<LocalityName>Екатеринбург</LocalityName>
<Thoroughfare>
<ThoroughfareName>улица Свердлова</ThoroughfareName>
<Premise>
<PremiseNumber>27</PremiseNumber>
</Premise>
</Thoroughfare>
</Locality>
</AdministrativeArea>
</Country>
</AddressDetails>
</GeocoderMetaData>
</metaDataProperty>
<boundedBy xmlns="http://www.opengis.net/gml">
<Envelope>
<lowerCorner>60.599720 56.852332</lowerCorner>
<upperCorner>60.607931 56.856830</upperCorner>
</Envelope>
</boundedBy>
<Point xmlns="http://www.opengis.net/gml">
<pos>60.603826 56.854581</pos>
</Point>
</GeoObject>
</featureMember>
</GeoObjectCollection>
</ymaps>
""".encode('utf8')
UNKNOWN_ADDRESS = u'''<?xml version="1.0" encoding="utf-8"?>
<ymaps xmlns="http://maps.yandex.ru/ymaps/1.x" xmlns:x="http://www.yandex.ru/xscript">
<GeoObjectCollection>
<metaDataProperty xmlns="http://www.opengis.net/gml">
<GeocoderResponseMetaData xmlns="http://maps.yandex.ru/geocoder/1.x">
<request>Екатеринбург, Свердлова 87876</request>
<found>0</found>
<results>10</results>
</GeocoderResponseMetaData>
</metaDataProperty>
</GeoObjectCollection>
</ymaps>
'''.encode('utf8')
TEST_API_KEY = 'vasia'
COORDS = (u'60.603826', u'56.854581')
MAP_URL = 'http://static-maps.yandex.ru/1.x/?ll=60.6038260,56.8545810&size=200,300&z=5&l=map&pt=60.6038260,56.8545810&key=vasia'
class GeocodeParsingTest(TestCase):
def test_parsing(self):
self.assertEqual(_get_coords(RESPONSE), COORDS)
def test_unknown(self):
self.assertEqual(_get_coords(UNKNOWN_ADDRESS), (None, None,))
# FIXME: тест полагается на порядок параметров в url
class MapUrlTest(TestCase):
def test_map_url(self):
url = get_map_url(TEST_API_KEY, COORDS[0], COORDS[1], 5, 200, 300)
self.assertEqual(url, MAP_URL)
| 39.045977 | 128 | 0.622019 |
794293c225d66e35c2075b2bd913d52736e2f3b9 | 551 | py | Python | conftest.py | daltonik666/python_training | 99e243c346aeeeb1698e31be04e1742cce6029d9 | [
"Apache-2.0"
] | null | null | null | conftest.py | daltonik666/python_training | 99e243c346aeeeb1698e31be04e1742cce6029d9 | [
"Apache-2.0"
] | null | null | null | conftest.py | daltonik666/python_training | 99e243c346aeeeb1698e31be04e1742cce6029d9 | [
"Apache-2.0"
] | null | null | null |
import pytest
from fixture.application import Application
fixture = None
@pytest.fixture
def app(request):
global fixture
if fixture is None:
fixture = Application()
else:
if not fixture.is_valid():
fixture = Application()
fixture.session.ensure_login(username="admin", password="secret")
return fixture
@pytest.fixture(scope="session", autouse=True)
def stop(request):
def fin():
fixture.session.ensure_logout()
fixture.destroy()
request.addfinalizer(fin)
return fixture | 22.958333 | 69 | 0.678766 |
794293edb49bc42e36dbffc69a3542ae8935d69c | 587 | py | Python | class/cls07.py | LBarros77/Python | 283b383d9d14c8d7b907b80f03f7cdc5dbd1e8af | [
"MIT"
] | null | null | null | class/cls07.py | LBarros77/Python | 283b383d9d14c8d7b907b80f03f7cdc5dbd1e8af | [
"MIT"
] | null | null | null | class/cls07.py | LBarros77/Python | 283b383d9d14c8d7b907b80f03f7cdc5dbd1e8af | [
"MIT"
] | null | null | null | class Date:
def __init__(self, day = 0, month = 0, year = 0):
self.day = day
self.month = month
self.year = year
@classmethod
def date_str(cls, string):
day, month, year = map(int, string.split("-"))
date1 = cls(day, month, year)
return date1
@staticmethod
def date_meta(string):
day, month, year = map(int, string.split("-"))
return day <= 31 and month <= 12 and year <= 2023
date2 = Date.date_str("23-7-2000")
is_date = Date.date_meta("12-11-1990")
print(date2.__dict__)
print(is_date) | 26.681818 | 57 | 0.577513 |
7942946759adb77f7029389e9da300939708dc59 | 15,023 | py | Python | tests/cloudformation/runner/test_runner.py | BenjaDiaz/checkov | c53e32f1654e4ee771abf2001b3cb7df16752f6e | [
"Apache-2.0"
] | null | null | null | tests/cloudformation/runner/test_runner.py | BenjaDiaz/checkov | c53e32f1654e4ee771abf2001b3cb7df16752f6e | [
"Apache-2.0"
] | 3 | 2022-03-07T20:37:31.000Z | 2022-03-21T20:20:14.000Z | tests/cloudformation/runner/test_runner.py | BenjaDiaz/checkov | c53e32f1654e4ee771abf2001b3cb7df16752f6e | [
"Apache-2.0"
] | null | null | null | import dis
import inspect
import os
import unittest
from pathlib import Path
from typing import Dict, Any
import pytest
from checkov.cloudformation import cfn_utils
from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck
from checkov.cloudformation.parser import parse
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.runner_filter import RunnerFilter
from checkov.cloudformation.runner import Runner
from checkov.common.output.report import Report
from checkov.cloudformation.cfn_utils import create_definitions
class TestRunnerValid(unittest.TestCase):
def test_record_relative_path_with_relative_dir(self):
# test whether the record's repo_file_path is correct, relative to the CWD (with a / at the start).
# this is just constructing the scan dir as normal
current_dir = os.path.dirname(os.path.realpath(__file__))
scan_dir_path = os.path.join(current_dir, "resources")
# this is the relative path to the directory to scan (what would actually get passed to the -d arg)
dir_rel_path = os.path.relpath(scan_dir_path).replace('\\', '/')
runner = Runner()
checks_allowlist = ['CKV_AWS_20']
report = runner.run(root_folder=dir_rel_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework='cloudformation', checks=checks_allowlist))
all_checks = report.failed_checks + report.passed_checks
self.assertGreater(len(all_checks), 0) # ensure that the assertions below are going to do something
for record in all_checks:
# no need to join with a '/' because the CFN runner adds it to the start of the file path
self.assertEqual(record.repo_file_path, f'/{dir_rel_path}{record.file_path}')
def test_record_relative_path_with_abs_dir(self):
# test whether the record's repo_file_path is correct, relative to the CWD (with a / at the start).
# this is just constructing the scan dir as normal
current_dir = os.path.dirname(os.path.realpath(__file__))
scan_dir_path = os.path.join(current_dir, "resources")
dir_rel_path = os.path.relpath(scan_dir_path).replace('\\', '/')
dir_abs_path = os.path.abspath(scan_dir_path)
runner = Runner()
checks_allowlist = ['CKV_AWS_20']
report = runner.run(root_folder=dir_abs_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework='cloudformation', checks=checks_allowlist))
all_checks = report.failed_checks + report.passed_checks
self.assertGreater(len(all_checks), 0) # ensure that the assertions below are going to do something
for record in all_checks:
# no need to join with a '/' because the CFN runner adds it to the start of the file path
self.assertEqual(record.repo_file_path, f'/{dir_rel_path}{record.file_path}')
def test_record_relative_path_with_relative_file(self):
# test whether the record's repo_file_path is correct, relative to the CWD (with a / at the start).
# this is just constructing the scan dir as normal
current_dir = os.path.dirname(os.path.realpath(__file__))
scan_file_path = os.path.join(current_dir, "resources", "success.json")
# this is the relative path to the file to scan (what would actually get passed to the -f arg)
file_rel_path = os.path.relpath(scan_file_path)
runner = Runner()
checks_allowlist = ['CKV_AWS_20']
report = runner.run(root_folder=None, external_checks_dir=None, files=[file_rel_path],
runner_filter=RunnerFilter(framework='cloudformation', checks=checks_allowlist))
all_checks = report.failed_checks + report.passed_checks
self.assertGreater(len(all_checks), 0) # ensure that the assertions below are going to do something
for record in all_checks:
# no need to join with a '/' because the CFN runner adds it to the start of the file path
self.assertEqual(record.repo_file_path, f'/{file_rel_path}')
def test_record_relative_path_with_abs_file(self):
# test whether the record's repo_file_path is correct, relative to the CWD (with a / at the start).
# this is just constructing the scan dir as normal
current_dir = os.path.dirname(os.path.realpath(__file__))
scan_file_path = os.path.join(current_dir, "resources", "success.json")
file_rel_path = os.path.relpath(scan_file_path)
file_abs_path = os.path.abspath(scan_file_path)
runner = Runner()
checks_allowlist = ['CKV_AWS_20']
report = runner.run(root_folder=None, external_checks_dir=None, files=[file_abs_path],
runner_filter=RunnerFilter(framework='cloudformation', checks=checks_allowlist))
all_checks = report.failed_checks + report.passed_checks
self.assertGreater(len(all_checks), 0) # ensure that the assertions below are going to do something
for record in all_checks:
# no need to join with a '/' because the CFN runner adds it to the start of the file path
self.assertEqual(record.repo_file_path, f'/{file_rel_path}')
def test_record_includes_custom_guideline(self):
custom_guideline_url = "https://my.custom.url"
custom_check_id = "MY_CUSTOM_CHECK"
class AnyFailingCheck(BaseResourceCheck):
def __init__(self, *_, **__) -> None:
super().__init__(
"this should fail",
custom_check_id,
[CheckCategories.ENCRYPTION],
["AWS::SQS::Queue"],
guideline=custom_guideline_url
)
def scan_resource_conf(self, conf: Dict[str, Any], entity_type: str) -> CheckResult:
return CheckResult.FAILED
AnyFailingCheck()
scan_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "resources", "fail.yaml")
report = Runner().run(
None,
files=[scan_file_path],
runner_filter=RunnerFilter(framework='cloudformation', checks=[custom_check_id])
)
self.assertEqual(report.failed_checks[0].guideline, custom_guideline_url)
def test_get_tags(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
scan_file_path = os.path.join(current_dir, "resources", "tags.yaml")
definitions, _ = parse(scan_file_path)
resource_name = 'DataBucket'
resource = definitions['Resources'][resource_name]
entity = {resource_name: resource}
entity_tags = cfn_utils.get_resource_tags(entity)
self.assertEqual(len(entity_tags), 4)
tags = {
'Simple': 'Value',
'Name': '${AWS::AccountId}-data',
'Environment': 'long-form-sub-${account}',
'Account': 'long-form-sub-${account}'
}
for name, value in tags.items():
self.assertEqual(entity_tags[name], value)
resource_name = 'NoTags'
resource = definitions['Resources'][resource_name]
entity = {resource_name: resource}
entity_tags = cfn_utils.get_resource_tags(entity)
self.assertIsNone(entity_tags)
'TerraformServerAutoScalingGroup'
resource_name = 'TerraformServerAutoScalingGroup'
resource = definitions['Resources'][resource_name]
entity = {resource_name: resource}
entity_tags = cfn_utils.get_resource_tags(entity)
self.assertIsNone(entity_tags)
resource_name = 'EKSClusterNodegroup'
resource = definitions['Resources'][resource_name]
entity = {resource_name: resource}
entity_tags = cfn_utils.get_resource_tags(entity)
self.assertEqual(len(entity_tags), 1)
tags = {
'Name': '{\'Ref\': \'ClusterName\'}-EKS-{\'Ref\': \'NodeGroupName\'}'
}
for name, value in tags.items():
self.assertEqual(entity_tags[name], value)
def test_wrong_check_imports(self):
wrong_imports = ["arm", "dockerfile", "helm", "kubernetes", "serverless", "terraform"]
ignore_files = ["BaseCloudsplainingIAMCheck.py"]
check_imports = []
checks_path = Path(inspect.getfile(Runner)).parent.joinpath("checks")
for file in checks_path.rglob("*.py"):
if file.name in ignore_files:
continue
with file.open() as f:
instructions = dis.get_instructions(f.read())
import_names = [instr.argval for instr in instructions if "IMPORT_NAME" == instr.opname]
for import_name in import_names:
wrong_import = next((import_name for x in wrong_imports if x in import_name), None)
if wrong_import:
check_imports.append({file.name: wrong_import})
assert len(check_imports) == 0, f"Wrong imports were added: {check_imports}"
@pytest.mark.skip("No graph checks implemented yet for cloudformation")
def test_run_graph_checks(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
scan_dir_path = os.path.join(current_dir, "../graph/checks/resources/MSKClusterLogging")
dir_abs_path = os.path.abspath(scan_dir_path)
report = Runner().run(
root_folder=dir_abs_path,
external_checks_dir=None,
runner_filter=RunnerFilter(framework=["cloudformation"], download_external_modules=False)
)
self.assertGreater(len(report.failed_checks), 0)
self.assertGreater(len(report.passed_checks), 0)
def test_external_data(self):
dir_abs_path = os.path.dirname(os.path.realpath(__file__))
definitions = {
f'{dir_abs_path}/s3.yaml': {
'Resources': {
'MySourceQueue': {
'Type': 'AWS::SQS::Queue',
'Properties': {
'KmsMasterKeyId': 'kms_id',
'__startline__': 17,
'__endline__': 22,
'resource_type': 'AWS::SQS::Queue'
}
},
'MyDB': {
'Type': 'AWS::RDS::DBInstance',
'Properties': {
'DBName': 'db',
'DBInstanceClass': 'db.t3.micro',
'Engine': 'mysql',
'MasterUsername': 'master',
'MasterUserPassword': 'password',
'__startline__': 23,
'__endline__': 32,
'resource_type': 'AWS::RDS::DBInstance'
}
}
}
}
}
context = {f'{dir_abs_path}/s3.yaml': {'Parameters': {'KmsMasterKeyId': {'start_line': 5, 'end_line': 9, 'code_lines': [(5, ' "KmsMasterKeyId": {\n'), (6, ' "Description": "Company Name",\n'), (7, ' "Type": "String",\n'), (8, ' "Default": "kms_id"\n'), (9, ' },\n')]}, 'DBName': {'start_line': 10, 'end_line': 14, 'code_lines': [(10, ' "DBName": {\n'), (11, ' "Description": "Name of the Database",\n'), (12, ' "Type": "String",\n'), (13, ' "Default": "db"\n'), (14, ' }\n')]}}, 'Resources': {'MySourceQueue': {'start_line': 17, 'end_line': 22, 'code_lines': [(17, ' "MySourceQueue": {\n'), (18, ' "Type": "AWS::SQS::Queue",\n'), (19, ' "Properties": {\n'), (20, ' "KmsMasterKeyId": { "Ref": "KmsMasterKeyId" }\n'), (21, ' }\n'), (22, ' },\n')], 'skipped_checks': []}, 'MyDB': {'start_line': 23, 'end_line': 32, 'code_lines': [(23, ' "MyDB": {\n'), (24, ' "Type": "AWS::RDS::DBInstance",\n'), (25, ' "Properties": {\n'), (26, ' "DBName": { "Ref": "DBName" },\n'), (27, ' "DBInstanceClass": "db.t3.micro",\n'), (28, ' "Engine": "mysql",\n'), (29, ' "MasterUsername": "master",\n'), (30, ' "MasterUserPassword": "password"\n'), (31, ' }\n'), (32, ' }\n')], 'skipped_checks': []}}, 'Outputs': {'DBAppPublicDNS': {'start_line': 35, 'end_line': 38, 'code_lines': [(35, ' "DBAppPublicDNS": {\n'), (36, ' "Description": "DB App Public DNS Name",\n'), (37, ' "Value": { "Fn::GetAtt" : [ "MyDB", "PublicDnsName" ] }\n'), (38, ' }\n')]}}}}
breadcrumbs = {}
runner = Runner()
runner.set_external_data(definitions, context, breadcrumbs)
report = Report('cloudformation')
runner.check_definitions(root_folder=dir_abs_path, runner_filter=RunnerFilter(framework='cloudformation', download_external_modules=False), report=report)
self.assertEqual(len(report.passed_checks), 2)
self.assertEqual(len(report.failed_checks), 3)
pass
def test_breadcrumbs_report(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
scan_dir_path = os.path.join(current_dir, "../graph/graph_builder/resources/variable_rendering/render_params")
dir_abs_path = os.path.abspath(scan_dir_path)
runner = Runner()
report = runner.run(root_folder=dir_abs_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework='cloudformation', download_external_modules=False, checks=["CKV_AWS_21"]))
self.assertEqual(1, len(report.failed_checks))
self.assertIsNotNone(report.failed_checks[0].breadcrumbs)
self.assertIsNotNone(report.failed_checks[0].breadcrumbs.get("VersioningConfiguration.Status"))
def test_parsing_error_yaml(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
scan_file_path = os.path.join(current_dir, "resources", "invalid.yaml")
runner = Runner()
report = runner.run(root_folder=None, external_checks_dir=None, files=[scan_file_path],
runner_filter=RunnerFilter(framework='cloudformation'))
self.assertEqual(report.parsing_errors, [scan_file_path])
def test_parsing_error_json(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
scan_file_path = os.path.join(current_dir, "resources", "invalid.json")
runner = Runner()
report = runner.run(root_folder=None, external_checks_dir=None, files=[scan_file_path],
runner_filter=RunnerFilter(framework='cloudformation'))
self.assertEqual(report.parsing_errors, [scan_file_path])
def test_parse_relevant_files_only(self):
definitions, _ = create_definitions(None, ['main.tf'])
# just check that we skip the file and return normally
self.assertFalse('main.tf' in definitions)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| 48.618123 | 1,579 | 0.623111 |
79429587d1c9cec880050745d1c6809ddbb4c38e | 8,433 | py | Python | models/official/detection/inference.py | Joxis/tpu | 6b1a71c3ba8c882cc26e15a54b5f2c302eb34620 | [
"Apache-2.0"
] | null | null | null | models/official/detection/inference.py | Joxis/tpu | 6b1a71c3ba8c882cc26e15a54b5f2c302eb34620 | [
"Apache-2.0"
] | null | null | null | models/official/detection/inference.py | Joxis/tpu | 6b1a71c3ba8c882cc26e15a54b5f2c302eb34620 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
r"""A stand-alone binary to run model inference and visualize results.
It currently only supports model of type `retinanet` and `mask_rcnn`. It only
supports running on CPU/GPU with batch size 1.
"""
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import csv
import io
from absl import flags
from absl import logging
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
import tensorflow.compat.v1 as tf
from configs import factory as config_factory
from dataloader import mode_keys
from modeling import factory as model_factory
from utils import box_utils
from utils import input_utils
from utils import mask_utils
from utils.object_detection import visualization_utils
from hyperparameters import params_dict
FLAGS = flags.FLAGS
flags.DEFINE_string(
'model', 'retinanet', 'Support `retinanet`, `mask_rcnn` and `shapemask`.')
flags.DEFINE_integer('image_size', 640, 'The image size.')
flags.DEFINE_string(
'checkpoint_path', '', 'The path to the checkpoint file.')
flags.DEFINE_string(
'config_file', '', 'The config file template.')
flags.DEFINE_string(
'params_override', '', 'The YAML file/string that specifies the parameters '
'override in addition to the `config_file`.')
flags.DEFINE_string(
'label_map_file', '',
'The label map file. See --label_map_format for the definition.')
flags.DEFINE_string(
'label_map_format', 'csv',
'The format of the label map file. Currently only support `csv` where the '
'format of each row is: `id:name`.')
flags.DEFINE_string(
'image_file_pattern', '',
'The glob that specifies the image file pattern.')
flags.DEFINE_string(
'output_html', '/tmp/test.html',
'The output HTML file that includes images with rendered detections.')
flags.DEFINE_integer(
'max_boxes_to_draw', 10, 'The maximum number of boxes to draw.')
flags.DEFINE_float(
'min_score_threshold', 0.05,
'The minimum score thresholds in order to draw boxes.')
def main(unused_argv):
del unused_argv
# Load the label map.
print(' - Loading the label map...')
label_map_dict = {}
if FLAGS.label_map_format == 'csv':
with tf.gfile.Open(FLAGS.label_map_file, 'r') as csv_file:
reader = csv.reader(csv_file, delimiter=':')
for row in reader:
if len(row) != 2:
raise ValueError('Each row of the csv label map file must be in '
'`id:name` format.')
id_index = int(row[0])
name = row[1]
label_map_dict[id_index] = {
'id': id_index,
'name': name,
}
else:
raise ValueError(
'Unsupported label map format: {}.'.format(FLAGS.label_mape_format))
params = config_factory.config_generator(FLAGS.model)
if FLAGS.config_file:
params = params_dict.override_params_dict(
params, FLAGS.config_file, is_strict=True)
params = params_dict.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params.override({
'architecture': {
'use_bfloat16': False, # The inference runs on CPU/GPU.
},
}, is_strict=True)
params.validate()
params.lock()
model = model_factory.model_generator(params)
with tf.Graph().as_default():
image_input = tf.placeholder(shape=(), dtype=tf.string)
image = tf.io.decode_image(image_input, channels=3)
image.set_shape([None, None, 3])
image = input_utils.normalize_image(image)
image_size = [FLAGS.image_size, FLAGS.image_size]
image, image_info = input_utils.resize_and_crop_image(
image,
image_size,
image_size,
aug_scale_min=1.0,
aug_scale_max=1.0)
image.set_shape([image_size[0], image_size[1], 3])
# batching.
images = tf.reshape(image, [1, image_size[0], image_size[1], 3])
images_info = tf.expand_dims(image_info, axis=0)
# model inference
outputs = model.build_outputs(
images, {'image_info': images_info}, mode=mode_keys.PREDICT)
# outputs['detection_boxes'] = (
# outputs['detection_boxes'] / tf.tile(images_info[:, 2:3, :], [1, 1, 2]))
predictions = outputs
# Create a saver in order to load the pre-trained checkpoint.
saver = tf.train.Saver()
image_with_detections_list = []
with tf.Session() as sess:
print(' - Loading the checkpoint...')
saver.restore(sess, FLAGS.checkpoint_path)
image_files = tf.gfile.Glob(FLAGS.image_file_pattern)
for i, image_file in enumerate(image_files):
print(' - Processing image %d...' % i)
with tf.gfile.GFile(image_file, 'rb') as f:
image_bytes = f.read()
image = Image.open(image_file)
image = image.convert('RGB') # needed for images with 4 channels.
width, height = image.size
np_image = (np.array(image.getdata())
.reshape(height, width, 3).astype(np.uint8))
print(np_image.shape)
predictions_np = sess.run(
predictions, feed_dict={image_input: image_bytes})
logits = predictions_np['logits'][0]
print(logits.shape)
labels = np.argmax(logits.squeeze(), -1)
print(labels.shape)
print(labels)
labels = np.array(
Image.fromarray(labels.astype('uint8')))
print(labels.shape)
plt.imshow(labels)
plt.savefig(f"temp-{i}.png")
# num_detections = int(predictions_np['num_detections'][0])
# np_boxes = predictions_np['detection_boxes'][0, :num_detections]
# np_scores = predictions_np['detection_scores'][0, :num_detections]
# np_classes = predictions_np['detection_classes'][0, :num_detections]
# np_classes = np_classes.astype(np.int32)
# np_masks = None
# if 'detection_masks' in predictions_np:
# instance_masks = predictions_np['detection_masks'][0, :num_detections]
# np_masks = mask_utils.paste_instance_masks(
# instance_masks, box_utils.yxyx_to_xywh(np_boxes), height, width)
#
# image_with_detections = (
# visualization_utils.visualize_boxes_and_labels_on_image_array(
# np_image,
# np_boxes,
# np_classes,
# np_scores,
# label_map_dict,
# instance_masks=np_masks,
# use_normalized_coordinates=False,
# max_boxes_to_draw=FLAGS.max_boxes_to_draw,
# min_score_thresh=FLAGS.min_score_threshold))
# image_with_detections_list.append(image_with_detections)
# print(' - Saving the outputs...')
# formatted_image_with_detections_list = [
# Image.fromarray(image.astype(np.uint8))
# for image in image_with_detections_list]
# html_str = '<html>'
# image_strs = []
# for formatted_image in formatted_image_with_detections_list:
# with io.BytesIO() as stream:
# formatted_image.save(stream, format='JPEG')
# data_uri = base64.b64encode(stream.getvalue()).decode('utf-8')
# image_strs.append(
# '<img src="data:image/jpeg;base64,{}", height=800>'
# .format(data_uri))
# images_str = ' '.join(image_strs)
# html_str += images_str
# html_str += '</html>'
# with tf.gfile.GFile(FLAGS.output_html, 'w') as f:
# f.write(html_str)
if __name__ == '__main__':
flags.mark_flag_as_required('model')
flags.mark_flag_as_required('checkpoint_path')
flags.mark_flag_as_required('label_map_file')
flags.mark_flag_as_required('image_file_pattern')
flags.mark_flag_as_required('output_html')
logging.set_verbosity(logging.INFO)
tf.app.run(main)
| 35.582278 | 82 | 0.670106 |
79429652637da9b9db83beddfed720442abd0280 | 4,541 | py | Python | lib/prairie.py | JLSirvent/bws-calibration-analysis | b2f129e31974c16d7498e105a075b43bfece92c9 | [
"MIT"
] | null | null | null | lib/prairie.py | JLSirvent/bws-calibration-analysis | b2f129e31974c16d7498e105a075b43bfece92c9 | [
"MIT"
] | null | null | null | lib/prairie.py | JLSirvent/bws-calibration-analysis | b2f129e31974c16d7498e105a075b43bfece92c9 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------
# Copyright (c) <2017> <Lionel Garcia>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------------
#
# Not fully documented
import matplotlib as mpl
from cycler import cycler
from matplotlib import pyplot as plt
from matplotlib.ticker import MultipleLocator
def use_colors(tones, i=None):
"""
Use specific color tones for plotting. If i is specified, this function returns a specific color from the corresponding color cycle
Args:
tones : 'hot' or 'cold' for hot and cold colors
Returns:
color i of the color cycle
"""
hot = ['#fed976', '#feb24c', '#fd8d3c', '#fc4e2a', '#e31a1c', '#b10026']
cold = ['#a6bddb', '#67a9cf', '#3690c0', '#02818a', '#016c59', '#014636']
# cold = ['#44AE7E', '#388A8D', '#397187', '#3E568E', '#463883', '#461167']
if i is None:
if tones is 'hot':
colors = hot
elif tones is 'cold':
colors = cold
else:
colors = tones
plt.rc('axes', prop_cycle=(cycler('color', colors)))
return colors
else:
if tones is 'hot':
colors = hot
elif tones is 'cold':
colors = cold
else:
colors = tones
return colors[i % len(colors)]
def use():
use_colors('cold')
mpl.rcParams['font.family'] = 'arial'
mpl.rcParams['axes.titlesize'] = 9
mpl.rcParams['axes.titlepad'] = 6
mpl.rcParams['text.antialiased'] = True
mpl.rcParams['text.color'] = '#545454'
mpl.rcParams['axes.labelcolor'] = '#545454'
mpl.rcParams['ytick.color'] = '#545454'
mpl.rcParams['xtick.color'] = '#545454'
mpl.rcParams['axes.titleweight'] = 'demibold'
mpl.rcParams['axes.labelsize'] = 9
mpl.rcParams['xtick.labelsize'] = 8
mpl.rcParams['ytick.labelsize'] = 8
mpl.rcParams['axes.spines.left'] = True
mpl.rcParams['axes.spines.bottom'] = True
mpl.rcParams['axes.spines.right'] = True
mpl.rcParams['axes.spines.top'] = True
mpl.rcParams['lines.antialiased'] = True
mpl.rcParams['lines.linewidth'] = 1
mpl.rcParams['lines.markersize'] = 3
mpl.rcParams['legend.fontsize'] = 8
mpl.rcParams['legend.columnspacing'] = 0.5
mpl.rcParams['axes.grid'] = True
mpl.rcParams['grid.color'] = '#DBDBDB'
mpl.rcParams['grid.alpha'] = 0.2
def style(axe, ticks=True):
"""
Apply Bokeh-like styling to a specific axe
Args:
axe : axe to be styled
"""
use()
if hasattr(axe, 'spines'):
axe.spines['bottom'].set_color('#545454')
axe.spines['left'].set_color('#545454')
axe.spines['top'].set_color('#DBDBDB')
axe.spines['right'].set_color('#DBDBDB')
axe.spines['top'].set_linewidth(1)
axe.spines['right'].set_linewidth(1)
if hasattr(axe, 'yaxis'):
axe.yaxis.labelpad = 3
if hasattr(axe, 'xaxis'):
axe.xaxis.labelpad = 3
if ticks is True:
if hasattr(axe, 'yaxis'):
x_ticks = axe.xaxis.get_majorticklocs()
axe.xaxis.set_minor_locator(MultipleLocator((x_ticks[1] - x_ticks[0]) / 5))
if hasattr(axe, 'yaxis'):
y_ticks = axe.yaxis.get_majorticklocs()
axe.yaxis.set_minor_locator(MultipleLocator((y_ticks[1] - y_ticks[0]) / 5)) | 35.755906 | 136 | 0.603612 |
7942970e843c3187e12d01d474256865bf1ada95 | 6,445 | py | Python | recuprm1.2.py | chichocoria/recuprm | 310997d8cf4bf8ea66bf9e63745bf9c98d8768fa | [
"Xnet",
"X11"
] | null | null | null | recuprm1.2.py | chichocoria/recuprm | 310997d8cf4bf8ea66bf9e63745bf9c98d8768fa | [
"Xnet",
"X11"
] | null | null | null | recuprm1.2.py | chichocoria/recuprm | 310997d8cf4bf8ea66bf9e63745bf9c98d8768fa | [
"Xnet",
"X11"
] | 2 | 2021-03-29T23:05:22.000Z | 2021-04-07T19:50:17.000Z |
import os.path
import os
import shutil
import glob
import winreg
import sched, time
import logging
#Busca en el registro de Windows el directorio de Puleo de Sitel Cliente.
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, "SOFTWARE\\VB and VBA Program Settings\\Sitel32\\Puleo") as key:
value = winreg.QueryValueEx(key,'Directorio')
##variable que contiene el directorio de trabajo, el resultado es una array pero solo traigo el valor 0.
dirtrabajo=(value[0])
print (dirtrabajo)
##variable que contiene el directorio del backup
bkp= r'/backup_itcs'
dirbackup = dirtrabajo + bkp
##variables que contiene el path hasta el archivo _sitel.*
archivoprm= dirtrabajo + '\\_sitel.prm'
archivoprv= dirtrabajo + '\\_sitel.prv'
archivoemp= dirtrabajo + '\\_sitel.emp'
print (dirbackup)
# Creación del logger con el archivo llamado logs_info.log.
logging.basicConfig(
format = '%(asctime)-5s %(name)-15s %(levelname)-8s %(message)s',
level = logging.INFO, # Nivel de evento INFO
filename = dirtrabajo + '\\logs_info_itcs.log', # archivo en donde se escriben los logs
filemode = "a" # a ("append"), si el archivo de logs ya existe, se abre y añaden nuevas lineas.
)
logging.info('Se inicio recuprm')
while True:
if os.path.exists('C:/Program Files (x86)'):
## Valida si el SO es de 64Bit
if os.path.exists(dirbackup):
print('La carpeta bkp existe y el sistema operatico es de 64bits.')
##Valida si existe el archivo prm que es el que desencadena el problema, si exite no hace nada, si no existe realiza el backup en la carpeta bkp
if os.path.exists(archivoprm):
print('El archivo existe.')
else:
files_sitel = glob.iglob(os.path.join(dirbackup, "_site*"))
for file in files_sitel:
if os.path.isfile(file):
shutil.copy2(file, dirtrabajo)
fileslic = glob.iglob(os.path.join(dirbackup, "*.lic"))
for file in fileslic:
if os.path.isfile(file):
shutil.copy2(file, 'C:/Program Files (x86)/ITC Soluciones/Sitel Cliente/')
filesprm = glob.iglob(os.path.join(dirbackup, "*.prm"))
for file in filesprm:
if os.path.isfile(file):
shutil.copy2(file, 'C:/Program Files (x86)/ITC Soluciones/Sitel Cliente/')
logging.info('El archivo prm se borro inesperadamente, se realizo el backup OK.')
else:
##Si existen los archivos _sitel prm prv y emp en el directorio de trabajo, crea la carpeta bkp
if os.path.exists(archivoprm) and os.path.exists(archivoprv) and os.path.exists(archivoemp):
os.mkdir(dirbackup)
files_sitel = glob.iglob(os.path.join(dirtrabajo, "_site*"))
for file in files_sitel:
if os.path.isfile(file):
shutil.copy2(file, dirbackup)
fileslic = glob.iglob(os.path.join(
'C:/Program Files (x86)/ITC Soluciones/Sitel Cliente/', "*.lic"))
for file in fileslic:
if os.path.isfile(file):
shutil.copy2(file, dirbackup)
filesprm = glob.iglob(os.path.join(
'C:/Program Files (x86)/ITC Soluciones/Sitel Cliente/', "*.prm"))
for file in filesprm:
if os.path.isfile(file):
shutil.copy2(file, dirbackup)
logging.info('Se creo la carpeta bkp con los archivos de registracion.')
else:
#El sistema operativo es de 32bits
if os.path.exists(dirbackup):
print('La carpeta bkp existe y el sistema operatico es de 32bits.')
##Valida si existe el archivo prm que es el que desencadena el problema, si exite no hace nada, si no existe realiza el backup
##en la carpeta bkp
if os.path.exists(archivoprm):
print('El archivo existe.')
else:
files_sitel = glob.iglob(os.path.join(dirbackup, "_site*"))
for file in files_sitel:
if os.path.isfile(file):
shutil.copy2(file, dirtrabajo)
fileslic = glob.iglob(os.path.join(
dirbackup, "*.lic"))
for file in fileslic:
if os.path.isfile(file):
shutil.copy2(file, 'C:/Program Files/ITC Soluciones/Sitel Cliente/')
filesprm = glob.iglob(os.path.join(
dirbackup, "*.prm"))
for file in filesprm:
if os.path.isfile(file):
shutil.copy2(file, 'C:/Program Files/ITC Soluciones/Sitel Cliente/')
logging.info('El archivo prm se borro inesperadamente, se realizo el backup OK.')
else:
os.mkdir(dirbackup)
files_sitel = glob.iglob(os.path.join(dirtrabajo, "_site*"))
for file in files_sitel:
if os.path.isfile(file):
shutil.copy2(file, dirbackup)
fileslic = glob.iglob(os.path.join(
'C:/Program Files/ITC Soluciones/Sitel Cliente/', "*.lic"))
for file in fileslic:
if os.path.isfile(file):
shutil.copy2(file, dirbackup)
filesprm = glob.iglob(os.path.join(
'C:/Program Files/ITC Soluciones/Sitel Cliente/', "*.prm"))
for file in filesprm:
if os.path.isfile(file):
shutil.copy2(file, dirbackup)
logging.info('Se creo la carpeta bkp con los archivos de registracion.')
#Arriba la busqueda de la carpeta bkp copia todos los archivos con extension prm, por eso abajo se borra el _sitel.prm de ese directorio al finalizar el programa.
#Esto mismo hace que al buscar todos los archivos con extension prm duplique el log con la leyenda "El archivo prm se borro inesperadamente, se realizo el backup OK.""
if os.path.exists ('C:/Program Files (x86)/ITC Soluciones/Sitel Cliente/_sitel.prm'):
os.remove('C:/Program Files (x86)/ITC Soluciones/Sitel Cliente/_sitel.prm')
##Corre cada 60 segundos el programa.
time.sleep(60) | 50.748031 | 172 | 0.586191 |
7942975ddf6e8f589e97a3abfaf9779652403b7b | 15,403 | py | Python | ryu/app/experiments/ECMP/fattree.py | Helloworld1995/Ryu_SDN_Controller | 2680f967debca361adc6ff14ddadcbbcde0c7082 | [
"Apache-2.0"
] | 1 | 2021-03-11T01:47:35.000Z | 2021-03-11T01:47:35.000Z | ryu/app/experiments/ECMP/fattree.py | Helloworld1995/Ryu_SDN_Controller | 2680f967debca361adc6ff14ddadcbbcde0c7082 | [
"Apache-2.0"
] | null | null | null | ryu/app/experiments/ECMP/fattree.py | Helloworld1995/Ryu_SDN_Controller | 2680f967debca361adc6ff14ddadcbbcde0c7082 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2016 Huang MaChi at Chongqing University
# of Posts and Telecommunications, China.
# Copyright (C) 2016 Li Cheng at Beijing University of Posts
# and Telecommunications. www.muzixing.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread
from mininet.net import Mininet
from mininet.node import Controller, RemoteController
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.link import Link, Intf, TCLink
from mininet.topo import Topo
import random
import os
import logging
import argparse
import time
from subprocess import Popen
from multiprocessing import Process
from ryu.app.experiments.readfile import readIpeers
import sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
parser = argparse.ArgumentParser(description="Parameters importation")
parser.add_argument('--k', dest='k', type=int, default=4, choices=[4, 8], help="Switch fanout number")
parser.add_argument('--duration', dest='duration', type=int, default=60, help="Duration (sec) for each iperf traffic generation")
parser.add_argument('--dir', dest='output_dir', help="Directory to store outputs")
parser.add_argument('--cpu', dest='cpu', type=float, default=1.0, help='Total CPU to allocate to hosts')
args = parser.parse_args()
class Fattree(Topo):
"""
Class of Fattree Topology.
"""
CoreSwitchList = []
AggSwitchList = []
EdgeSwitchList = []
HostList = []
def __init__(self, k, density):
self.pod = k
self.density = density
self.iCoreLayerSwitch = (k/2)**2
self.iAggLayerSwitch = k*k/2
self.iEdgeLayerSwitch = k*k/2
self.iHost = self.iEdgeLayerSwitch * density
# Topo initiation
Topo.__init__(self)
def createNodes(self):
self.createCoreLayerSwitch(self.iCoreLayerSwitch)
self.createAggLayerSwitch(self.iAggLayerSwitch)
self.createEdgeLayerSwitch(self.iEdgeLayerSwitch)
self.createHost(self.iHost)
def _addSwitch(self, number, level, switch_list):
"""
Create switches.
"""
for i in xrange(1, number+1):
PREFIX = str(level) + "00"
if i >= 10:
PREFIX = str(level) + "0"
switch_list.append(self.addSwitch(PREFIX + str(i)))
def createCoreLayerSwitch(self, NUMBER):
self._addSwitch(NUMBER, 1, self.CoreSwitchList)
def createAggLayerSwitch(self, NUMBER):
self._addSwitch(NUMBER, 2, self.AggSwitchList)
def createEdgeLayerSwitch(self, NUMBER):
self._addSwitch(NUMBER, 3, self.EdgeSwitchList)
def createHost(self, NUMBER):
"""
Create hosts.
"""
for i in xrange(1, NUMBER+1):
if i >= 100:
PREFIX = "h"
elif i >= 10:
PREFIX = "h0"
else:
PREFIX = "h00"
self.HostList.append(self.addHost(PREFIX + str(i), cpu=args.cpu/float(NUMBER)))
def createLinks(self, bw_c2a, bw_a2e, bw_e2h,links_loss=None):
"""
Add network links.
"""
# Core to Agg
end = self.pod/2
for x in xrange(0, self.iAggLayerSwitch, end):
for i in xrange(0, end):
for j in xrange(0, end):
self.addLink(
self.CoreSwitchList[i*end+j],
self.AggSwitchList[x+i],
bw=bw_c2a, max_queue_size=1000) # use_htb=False
# Agg to Edge
for x in xrange(0, self.iAggLayerSwitch, end):
for i in xrange(0, end):
for j in xrange(0, end):
self.addLink(
self.AggSwitchList[x+i], self.EdgeSwitchList[x+j],
bw=bw_a2e, max_queue_size=1000) # use_htb=False
# Edge to Host
for x in xrange(0, self.iEdgeLayerSwitch):
for i in xrange(0, self.density):
self.addLink(
self.EdgeSwitchList[x],
self.HostList[self.density * x + i],
bw=bw_e2h, max_queue_size=1000) # use_htb=False
def set_ovs_protocol_13(self,):
"""
Set the OpenFlow version for switches.
"""
self._set_ovs_protocol_13(self.CoreSwitchList)
self._set_ovs_protocol_13(self.AggSwitchList)
self._set_ovs_protocol_13(self.EdgeSwitchList)
def _set_ovs_protocol_13(self, sw_list):
for sw in sw_list:
cmd = "sudo ovs-vsctl set bridge %s protocols=OpenFlow13" % sw
os.system(cmd)
def set_host_ip(net, topo):
hostlist = []
for k in xrange(len(topo.HostList)):
hostlist.append(net.get(topo.HostList[k]))
i = 1
j = 1
for host in hostlist:
host.setIP("10.%d.0.%d" % (i, j))
j += 1
if j == topo.density+1:
j = 1
i += 1
def create_subnetList(topo, num):
"""
Create the subnet list of the certain Pod.
"""
subnetList = []
remainder = num % (topo.pod/2)
if topo.pod == 4:
if remainder == 0:
subnetList = [num-1, num]
elif remainder == 1:
subnetList = [num, num+1]
else:
pass
elif topo.pod == 8:
if remainder == 0:
subnetList = [num-3, num-2, num-1, num]
elif remainder == 1:
subnetList = [num, num+1, num+2, num+3]
elif remainder == 2:
subnetList = [num-1, num, num+1, num+2]
elif remainder == 3:
subnetList = [num-2, num-1, num, num+1]
else:
pass
else:
pass
return subnetList
def install_proactive(net, topo):
"""
Install proactive flow entries for switches.
"""
# Edge Switch
for sw in topo.EdgeSwitchList:
num = int(sw[-2:])
# Downstream
for i in xrange(1, topo.density+1):
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=40,arp, \
nw_dst=10.%d.0.%d,actions=output:%d'" % (sw, num, i, topo.pod/2+i)
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=40,ip, \
nw_dst=10.%d.0.%d,actions=output:%d'" % (sw, num, i, topo.pod/2+i)
os.system(cmd)
# Upstream
if topo.pod == 4:
cmd = "ovs-ofctl add-group %s -O OpenFlow13 \
'group_id=1,type=select,bucket=output:1,bucket=output:2'" % sw
elif topo.pod == 8:
cmd = "ovs-ofctl add-group %s -O OpenFlow13 \
'group_id=1,type=select,bucket=output:1,bucket=output:2,\
bucket=output:3,bucket=output:4'" % sw
else:
pass
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,priority=10,arp,actions=group:1'" % sw
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,priority=10,ip,actions=group:1'" % sw
os.system(cmd)
# Aggregate Switch
for sw in topo.AggSwitchList:
num = int(sw[-2:])
subnetList = create_subnetList(topo, num)
# Downstream
k = 1
for i in subnetList:
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=40,arp, \
nw_dst=10.%d.0.0/16, actions=output:%d'" % (sw, i, topo.pod/2+k)
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=40,ip, \
nw_dst=10.%d.0.0/16, actions=output:%d'" % (sw, i, topo.pod/2+k)
os.system(cmd)
k += 1
# Upstream
if topo.pod == 4:
cmd = "ovs-ofctl add-group %s -O OpenFlow13 \
'group_id=1,type=select,bucket=output:1,bucket=output:2'" % sw
elif topo.pod == 8:
cmd = "ovs-ofctl add-group %s -O OpenFlow13 \
'group_id=1,type=select,bucket=output:1,bucket=output:2,\
bucket=output:3,bucket=output:4'" % sw
else:
pass
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,priority=10,arp,actions=group:1'" % sw
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,priority=10,ip,actions=group:1'" % sw
os.system(cmd)
# Core Switch
for sw in topo.CoreSwitchList:
j = 1
k = 1
for i in xrange(1, len(topo.EdgeSwitchList)+1):
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=10,arp, \
nw_dst=10.%d.0.0/16, actions=output:%d'" % (sw, i, j)
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=10,ip, \
nw_dst=10.%d.0.0/16, actions=output:%d'" % (sw, i, j)
os.system(cmd)
k += 1
if k == topo.pod/2 + 1:
j += 1
k = 1
def monitor_devs_ng(fname="./txrate.txt", interval_sec=0.1):
"""
Use bwm-ng tool to collect interface transmit rate statistics.
bwm-ng Mode: rate;
interval time: 1s.
"""
cmd = "sleep 1; bwm-ng -t %s -o csv -u bits -T rate -C ',' > %s" % (interval_sec * 1000, fname)
Popen(cmd, shell=True).wait()
def pingTest(net,flows_peers):
count=0
for src,dst in flows_peers:
count+=1
server=net.get(dst)
client=net.get(src)
# client.cmd('ping %s -c %d > %s/pingTest/ping_%s_%s_%d &'%(server.IP(),60,args.output_dir,src,dst,count))
client.cmd('ping -c %d -i 0.1 -n -q %s>> %s/%s &' % (args.duration,server.IP(), args.output_dir,'successive_packets.txt'))
time.sleep(random.random())
def traffic_generation1(net,flows_peers,ping_peers):
"""
Generate traffics and test the performance of the network.
"""
# 1.Start iperf. (Elephant flows)
# Start the servers.
serversList = set([peer[1] for peer in flows_peers])
for server in serversList:
# filename = server[1:]
server = net.get(server)
# server.cmd("iperf -s > %s/%s &" % (args.output_dir, 'server'+filename+'.txt'))
server.cmd("iperf -s > /dev/null &") # Its statistics is useless, just throw away.
time.sleep(3)
# Start the clients.
for src, dest in flows_peers:
time.sleep(1)
server = net.get(dest)
client = net.get(src)
client.cmd("iperf -c %s -t %d -M 1250 > /dev/null &" % (server.IP(), 3000))
pingTest(net,ping_peers)
time.sleep(30)
monitor = Process(target=monitor_devs_ng, args=('%s/bwmng.txt' % args.output_dir, 1.0))
monitor.start()
# client.cmd("iperf -c %s -t %d -M 1250 > /dev/null &" % (server.IP(), 2500) ) # Its statistics is useless, just throw away. 1990 just means a great
# client.cmd("iperf -c %s -t %d -M 1250 > /dev/null &" % (server.IP(), random.randint(10,60)))
# time.sleep(1)
# monitor = Process(target = monitor_devs_ng, args = ('%s/bwmng.txt' % args.output_dir, 1.0))
# Wait for the traffic to become stable.
# 3. The experiment is going on.
time.sleep(args.duration + 5)
monitor.terminate()
def traffic_generation(net,flows_peers,monitor1,monitor):
"""
Generate traffics and test the performance of the network.
"""
# 1.Start iperf. (Elephant flows)
# Start the servers.
serversList = set([peer[1] for peer in flows_peers])
for server in serversList:
# filename = server[1:]
server = net.get(server)
# server.cmd("iperf -s > %s/%s &" % (args.output_dir, 'server'+filename+'.txt'))
server.cmd("iperf -s > /dev/null &") # Its statistics is useless, just throw away.
time.sleep(3)
# Start the clients.
monitor1.start()
for src, dest in flows_peers:
time.sleep(1)
server = net.get(dest)
client = net.get(src)
Thread(target=iperfC, args=(client, server.IP(), 3000,)).start()
# client.cmd("iperf -c %s -t %d -M 1250 > /dev/null &" % (server.IP(), 2500) ) # Its statistics is useless, just throw away. 1990 just means a great
# client.cmd("iperf -c %s -t %d -M 1250 > /dev/null &" % (server.IP(), random.randint(10,60)))
# time.sleep(1)
# monitor = Process(target = monitor_devs_ng, args = ('%s/bwmng.txt' % args.output_dir, 1.0))
time.sleep(30)
monitor.start()
# Wait for the traffic to become stable.
# 3. The experiment is going on.
time.sleep(args.duration + 5)
monitor.terminate()
monitor1.terminate()
def iperfC(client,ip,time):
client.cmd("iperf -c %s -t %d -M 1250 > /dev/null &" % (ip, time))
def run_experiment(pod, density, ip="192.168.16.137", port=6653, bw_c2a=100, bw_a2e=100, bw_e2h=100):
"""
Firstly, start up Mininet;
secondly, generate traffics and test the performance of the network.
"""
# Create Topo.
iperfPath = '/home/lee/ryu2/ryu/app/experiments/iperf_peers.txt'
pingPath = '/home/lee/ryu2/ryu/app/experiments/ping_test.txt'
# lossPath = '/home/lee/ryu2/ryu/app/experiments/link_loss.txt'
iperf_peers = readIpeers(iperfPath)
ping_peers = readIpeers(pingPath)
# loss = readIpeers(lossPath)
time.sleep(2)
topo = Fattree(pod, density)
topo.createNodes()
topo.createLinks(bw_c2a=bw_c2a, bw_a2e=bw_a2e, bw_e2h=bw_e2h,links_loss=loss)
# 1. Start Mininet
CONTROLLER_IP = ip
CONTROLLER_PORT = port
net = Mininet(topo=topo, link=TCLink, controller=None, autoSetMacs=True)
net.addController(
'controller', controller=RemoteController,
ip=CONTROLLER_IP, port=CONTROLLER_PORT)
net.start()
# Set the OpenFlow version for switches as 1.3.0.
topo.set_ovs_protocol_13()
# Set the IP addresses for hosts.
set_host_ip(net, topo)
# Install proactive flow entries.
install_proactive(net, topo)
# monitor1 = Process(target=pingTest, args=(net, ping_peers))
# monitor = Process(target=monitor_devs_ng, args=('%s/bwmng.txt' % args.output_dir, 1.0))
# 3. Generate traffics and test the performance of the network.
# traffic_generation(net, iperf_peers, monitor1,monitor)
traffic_generation1(net, iperf_peers, ping_peers)
os.system('killall ping')
os.system('killall iperf')
# CLI(net)
# os.killpg(Controller_Ryu.pid, signal.SIGKILL)
net.stop()
if __name__ == '__main__':
setLogLevel('info')
if os.getuid() != 0:
logging.warning("You are NOT root!")
elif os.getuid() == 0:
# run_experiment(4, 2) or run_experiment(8, 4)
run_experiment(args.k, args.k/2)
| 37.205314 | 156 | 0.595728 |
Subsets and Splits