ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b4094b672c146da78a93622c3d79b3c8e560aaac | # coding: utf-8
"""
Ed-Fi Operational Data Store API
The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class EdFiStudentParentAssociation(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'parent_reference': 'EdFiParentReference',
'student_reference': 'EdFiStudentReference',
'contact_priority': 'int',
'contact_restrictions': 'str',
'emergency_contact_status': 'bool',
'legal_guardian': 'bool',
'lives_with': 'bool',
'primary_contact_status': 'bool',
'relation_descriptor': 'str',
'etag': 'str'
}
attribute_map = {
'id': 'id',
'parent_reference': 'parentReference',
'student_reference': 'studentReference',
'contact_priority': 'contactPriority',
'contact_restrictions': 'contactRestrictions',
'emergency_contact_status': 'emergencyContactStatus',
'legal_guardian': 'legalGuardian',
'lives_with': 'livesWith',
'primary_contact_status': 'primaryContactStatus',
'relation_descriptor': 'relationDescriptor',
'etag': '_etag'
}
def __init__(self, id=None, parent_reference=None, student_reference=None, contact_priority=None, contact_restrictions=None, emergency_contact_status=None, legal_guardian=None, lives_with=None, primary_contact_status=None, relation_descriptor=None, etag=None, _configuration=None): # noqa: E501
"""EdFiStudentParentAssociation - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._id = None
self._parent_reference = None
self._student_reference = None
self._contact_priority = None
self._contact_restrictions = None
self._emergency_contact_status = None
self._legal_guardian = None
self._lives_with = None
self._primary_contact_status = None
self._relation_descriptor = None
self._etag = None
self.discriminator = None
if id is not None:
self.id = id
self.parent_reference = parent_reference
self.student_reference = student_reference
if contact_priority is not None:
self.contact_priority = contact_priority
if contact_restrictions is not None:
self.contact_restrictions = contact_restrictions
if emergency_contact_status is not None:
self.emergency_contact_status = emergency_contact_status
if legal_guardian is not None:
self.legal_guardian = legal_guardian
if lives_with is not None:
self.lives_with = lives_with
if primary_contact_status is not None:
self.primary_contact_status = primary_contact_status
if relation_descriptor is not None:
self.relation_descriptor = relation_descriptor
if etag is not None:
self.etag = etag
@property
def id(self):
"""Gets the id of this EdFiStudentParentAssociation. # noqa: E501
# noqa: E501
:return: The id of this EdFiStudentParentAssociation. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this EdFiStudentParentAssociation.
# noqa: E501
:param id: The id of this EdFiStudentParentAssociation. # noqa: E501
:type: str
"""
self._id = id
@property
def parent_reference(self):
"""Gets the parent_reference of this EdFiStudentParentAssociation. # noqa: E501
:return: The parent_reference of this EdFiStudentParentAssociation. # noqa: E501
:rtype: EdFiParentReference
"""
return self._parent_reference
@parent_reference.setter
def parent_reference(self, parent_reference):
"""Sets the parent_reference of this EdFiStudentParentAssociation.
:param parent_reference: The parent_reference of this EdFiStudentParentAssociation. # noqa: E501
:type: EdFiParentReference
"""
if self._configuration.client_side_validation and parent_reference is None:
raise ValueError("Invalid value for `parent_reference`, must not be `None`") # noqa: E501
self._parent_reference = parent_reference
@property
def student_reference(self):
"""Gets the student_reference of this EdFiStudentParentAssociation. # noqa: E501
:return: The student_reference of this EdFiStudentParentAssociation. # noqa: E501
:rtype: EdFiStudentReference
"""
return self._student_reference
@student_reference.setter
def student_reference(self, student_reference):
"""Sets the student_reference of this EdFiStudentParentAssociation.
:param student_reference: The student_reference of this EdFiStudentParentAssociation. # noqa: E501
:type: EdFiStudentReference
"""
if self._configuration.client_side_validation and student_reference is None:
raise ValueError("Invalid value for `student_reference`, must not be `None`") # noqa: E501
self._student_reference = student_reference
@property
def contact_priority(self):
"""Gets the contact_priority of this EdFiStudentParentAssociation. # noqa: E501
The numeric order of the preferred sequence or priority of contact. # noqa: E501
:return: The contact_priority of this EdFiStudentParentAssociation. # noqa: E501
:rtype: int
"""
return self._contact_priority
@contact_priority.setter
def contact_priority(self, contact_priority):
"""Sets the contact_priority of this EdFiStudentParentAssociation.
The numeric order of the preferred sequence or priority of contact. # noqa: E501
:param contact_priority: The contact_priority of this EdFiStudentParentAssociation. # noqa: E501
:type: int
"""
self._contact_priority = contact_priority
@property
def contact_restrictions(self):
"""Gets the contact_restrictions of this EdFiStudentParentAssociation. # noqa: E501
Restrictions for student and/or teacher contact with the individual (e.g., the student may not be picked up by the individual). # noqa: E501
:return: The contact_restrictions of this EdFiStudentParentAssociation. # noqa: E501
:rtype: str
"""
return self._contact_restrictions
@contact_restrictions.setter
def contact_restrictions(self, contact_restrictions):
"""Sets the contact_restrictions of this EdFiStudentParentAssociation.
Restrictions for student and/or teacher contact with the individual (e.g., the student may not be picked up by the individual). # noqa: E501
:param contact_restrictions: The contact_restrictions of this EdFiStudentParentAssociation. # noqa: E501
:type: str
"""
if (self._configuration.client_side_validation and
contact_restrictions is not None and len(contact_restrictions) > 250):
raise ValueError("Invalid value for `contact_restrictions`, length must be less than or equal to `250`") # noqa: E501
self._contact_restrictions = contact_restrictions
@property
def emergency_contact_status(self):
"""Gets the emergency_contact_status of this EdFiStudentParentAssociation. # noqa: E501
Indicator of whether the person is a designated emergency contact for the Student. # noqa: E501
:return: The emergency_contact_status of this EdFiStudentParentAssociation. # noqa: E501
:rtype: bool
"""
return self._emergency_contact_status
@emergency_contact_status.setter
def emergency_contact_status(self, emergency_contact_status):
"""Sets the emergency_contact_status of this EdFiStudentParentAssociation.
Indicator of whether the person is a designated emergency contact for the Student. # noqa: E501
:param emergency_contact_status: The emergency_contact_status of this EdFiStudentParentAssociation. # noqa: E501
:type: bool
"""
self._emergency_contact_status = emergency_contact_status
@property
def legal_guardian(self):
"""Gets the legal_guardian of this EdFiStudentParentAssociation. # noqa: E501
Indicator of whether the person is a legal guardian for the Student. # noqa: E501
:return: The legal_guardian of this EdFiStudentParentAssociation. # noqa: E501
:rtype: bool
"""
return self._legal_guardian
@legal_guardian.setter
def legal_guardian(self, legal_guardian):
"""Sets the legal_guardian of this EdFiStudentParentAssociation.
Indicator of whether the person is a legal guardian for the Student. # noqa: E501
:param legal_guardian: The legal_guardian of this EdFiStudentParentAssociation. # noqa: E501
:type: bool
"""
self._legal_guardian = legal_guardian
@property
def lives_with(self):
"""Gets the lives_with of this EdFiStudentParentAssociation. # noqa: E501
Indicator of whether the Student lives with the associated parent. # noqa: E501
:return: The lives_with of this EdFiStudentParentAssociation. # noqa: E501
:rtype: bool
"""
return self._lives_with
@lives_with.setter
def lives_with(self, lives_with):
"""Sets the lives_with of this EdFiStudentParentAssociation.
Indicator of whether the Student lives with the associated parent. # noqa: E501
:param lives_with: The lives_with of this EdFiStudentParentAssociation. # noqa: E501
:type: bool
"""
self._lives_with = lives_with
@property
def primary_contact_status(self):
"""Gets the primary_contact_status of this EdFiStudentParentAssociation. # noqa: E501
Indicator of whether the person is a primary parental contact for the Student. # noqa: E501
:return: The primary_contact_status of this EdFiStudentParentAssociation. # noqa: E501
:rtype: bool
"""
return self._primary_contact_status
@primary_contact_status.setter
def primary_contact_status(self, primary_contact_status):
"""Sets the primary_contact_status of this EdFiStudentParentAssociation.
Indicator of whether the person is a primary parental contact for the Student. # noqa: E501
:param primary_contact_status: The primary_contact_status of this EdFiStudentParentAssociation. # noqa: E501
:type: bool
"""
self._primary_contact_status = primary_contact_status
@property
def relation_descriptor(self):
"""Gets the relation_descriptor of this EdFiStudentParentAssociation. # noqa: E501
The nature of an individual's relationship to a student, primarily used to capture family relationships. # noqa: E501
:return: The relation_descriptor of this EdFiStudentParentAssociation. # noqa: E501
:rtype: str
"""
return self._relation_descriptor
@relation_descriptor.setter
def relation_descriptor(self, relation_descriptor):
"""Sets the relation_descriptor of this EdFiStudentParentAssociation.
The nature of an individual's relationship to a student, primarily used to capture family relationships. # noqa: E501
:param relation_descriptor: The relation_descriptor of this EdFiStudentParentAssociation. # noqa: E501
:type: str
"""
if (self._configuration.client_side_validation and
relation_descriptor is not None and len(relation_descriptor) > 306):
raise ValueError("Invalid value for `relation_descriptor`, length must be less than or equal to `306`") # noqa: E501
self._relation_descriptor = relation_descriptor
@property
def etag(self):
"""Gets the etag of this EdFiStudentParentAssociation. # noqa: E501
A unique system-generated value that identifies the version of the resource. # noqa: E501
:return: The etag of this EdFiStudentParentAssociation. # noqa: E501
:rtype: str
"""
return self._etag
@etag.setter
def etag(self, etag):
"""Sets the etag of this EdFiStudentParentAssociation.
A unique system-generated value that identifies the version of the resource. # noqa: E501
:param etag: The etag of this EdFiStudentParentAssociation. # noqa: E501
:type: str
"""
self._etag = etag
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EdFiStudentParentAssociation, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EdFiStudentParentAssociation):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, EdFiStudentParentAssociation):
return True
return self.to_dict() != other.to_dict()
|
py | b4094c97ce69c57948e2b37bb95d6d872378adf7 | from __future__ import absolute_import, division, print_function
import salty
from rdkit.Chem import AllChem as Chem
import unittest
import datetime
class check_data_tests(unittest.TestCase):
data_files = ["cationInfo.csv", "anionInfo.csv"]
def test_1_check_data(self):
for i in range(len(self.data_files)):
df = salty.load_data(self.data_files[i])
self.check_data(df)
def test_benchmark(self):
salty.Benchmark.run(self.test_1_check_data)
def check_data(self, df):
startTime = datetime.datetime.now()
def fnDisplay(message):
display(message, startTime)
smiles = df.smiles
for i in range(len(smiles)):
ion = smiles[i]
try:
Chem.SanitizeMol(Chem.MolFromSmiles(ion))
except ValueError:
name = salty.checkName(ion)
message = "RDKit cannot interpret %s ion SMILES in datafile" \
% name
fnDisplay(message)
if "-" not in ion and "+" not in ion:
name = salty.checkName(ion)
message = "%s ion does not have a charge" % name
fnDisplay(message)
if "." in ion:
name = salty.checkName(ion)
message = "%s ion contains more than one molecular entity" \
% name
fnDisplay(message)
def display(message, startTime):
timeDiff = datetime.datetime.now() - startTime
print("{}\t{}".format(timeDiff, message))
if __name__ == '__main__':
unittest.main()
|
py | b4094d123ed5fc7df631f07bd5dc12bd2e01d237 | import os
import sys
import pygame
import random
from pygame import *
from src.utils.images import load_image, load_sprite_sheet
from src.utils.numeric import extractDigits
from src.utils.sounds import *
class Cactus(pygame.sprite.Sprite):
def __init__(self, screen, speed=5, sizex=-1, sizey=-1, scr_size=(600,150)):
pygame.sprite.Sprite.__init__(self,self.containers)
self.screen = screen
self.scr_width = scr_size[0]
self.scr_height = scr_size[1]
self.images,self.rect = load_sprite_sheet('cacti-small.png',3,1,sizex,sizey,-1)
self.rect.bottom = int(0.98*self.scr_height)
self.rect.left = self.scr_width + self.rect.width
self.image = self.images[random.randrange(0,3)]
self.movement = [-1*speed,0]
def draw(self):
self.screen.blit(self.image,self.rect)
def update(self):
self.rect = self.rect.move(self.movement)
if self.rect.right < 0:
self.kill() |
py | b4094d2320d2345661b089c05d74d778d4199427 | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The Finalcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction time during old block rescanning
"""
import time
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import FinalcoinTestFramework
from test_framework.util import (
assert_equal
)
class TransactionTimeRescanTest(FinalcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 3
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info('Prepare nodes and wallet')
minernode = self.nodes[0] # node used to mine BTC and create transactions
usernode = self.nodes[1] # user node with correct time
restorenode = self.nodes[2] # node used to restore user wallet and check time determination in ComputeSmartTime (wallet.cpp)
# time constant
cur_time = int(time.time())
ten_days = 10 * 24 * 60 * 60
# synchronize nodes and time
self.sync_all()
minernode.setmocktime(cur_time)
usernode.setmocktime(cur_time)
restorenode.setmocktime(cur_time)
# prepare miner wallet
minernode.createwallet(wallet_name='default')
miner_wallet = minernode.get_wallet_rpc('default')
m1 = miner_wallet.getnewaddress()
# prepare the user wallet with 3 watch only addresses
wo1 = usernode.getnewaddress()
wo2 = usernode.getnewaddress()
wo3 = usernode.getnewaddress()
usernode.createwallet(wallet_name='wo', disable_private_keys=True)
wo_wallet = usernode.get_wallet_rpc('wo')
wo_wallet.importaddress(wo1)
wo_wallet.importaddress(wo2)
wo_wallet.importaddress(wo3)
self.log.info('Start transactions')
# check blockcount
assert_equal(minernode.getblockcount(), 200)
# generate some btc to create transactions and check blockcount
initial_mine = COINBASE_MATURITY + 1
minernode.generatetoaddress(initial_mine, m1)
assert_equal(minernode.getblockcount(), initial_mine + 200)
# synchronize nodes and time
self.sync_all()
minernode.setmocktime(cur_time + ten_days)
usernode.setmocktime(cur_time + ten_days)
restorenode.setmocktime(cur_time + ten_days)
# send 10 btc to user's first watch-only address
self.log.info('Send 10 btc to user')
miner_wallet.sendtoaddress(wo1, 10)
# generate blocks and check blockcount
minernode.generatetoaddress(COINBASE_MATURITY, m1)
assert_equal(minernode.getblockcount(), initial_mine + 300)
# synchronize nodes and time
self.sync_all()
minernode.setmocktime(cur_time + ten_days + ten_days)
usernode.setmocktime(cur_time + ten_days + ten_days)
restorenode.setmocktime(cur_time + ten_days + ten_days)
# send 5 btc to our second watch-only address
self.log.info('Send 5 btc to user')
miner_wallet.sendtoaddress(wo2, 5)
# generate blocks and check blockcount
minernode.generatetoaddress(COINBASE_MATURITY, m1)
assert_equal(minernode.getblockcount(), initial_mine + 400)
# synchronize nodes and time
self.sync_all()
minernode.setmocktime(cur_time + ten_days + ten_days + ten_days)
usernode.setmocktime(cur_time + ten_days + ten_days + ten_days)
restorenode.setmocktime(cur_time + ten_days + ten_days + ten_days)
# send 1 btc to our third watch-only address
self.log.info('Send 1 btc to user')
miner_wallet.sendtoaddress(wo3, 1)
# generate more blocks and check blockcount
minernode.generatetoaddress(COINBASE_MATURITY, m1)
assert_equal(minernode.getblockcount(), initial_mine + 500)
self.log.info('Check user\'s final balance and transaction count')
assert_equal(wo_wallet.getbalance(), 16)
assert_equal(len(wo_wallet.listtransactions()), 3)
self.log.info('Check transaction times')
for tx in wo_wallet.listtransactions():
if tx['address'] == wo1:
assert_equal(tx['blocktime'], cur_time + ten_days)
assert_equal(tx['time'], cur_time + ten_days)
elif tx['address'] == wo2:
assert_equal(tx['blocktime'], cur_time + ten_days + ten_days)
assert_equal(tx['time'], cur_time + ten_days + ten_days)
elif tx['address'] == wo3:
assert_equal(tx['blocktime'], cur_time + ten_days + ten_days + ten_days)
assert_equal(tx['time'], cur_time + ten_days + ten_days + ten_days)
# restore user wallet without rescan
self.log.info('Restore user wallet on another node without rescan')
restorenode.createwallet(wallet_name='wo', disable_private_keys=True)
restorewo_wallet = restorenode.get_wallet_rpc('wo')
restorewo_wallet.importaddress(wo1, rescan=False)
restorewo_wallet.importaddress(wo2, rescan=False)
restorewo_wallet.importaddress(wo3, rescan=False)
# check user has 0 balance and no transactions
assert_equal(restorewo_wallet.getbalance(), 0)
assert_equal(len(restorewo_wallet.listtransactions()), 0)
# proceed to rescan, first with an incomplete one, then with a full rescan
self.log.info('Rescan last history part')
restorewo_wallet.rescanblockchain(initial_mine + 350)
self.log.info('Rescan all history')
restorewo_wallet.rescanblockchain()
self.log.info('Check user\'s final balance and transaction count after restoration')
assert_equal(restorewo_wallet.getbalance(), 16)
assert_equal(len(restorewo_wallet.listtransactions()), 3)
self.log.info('Check transaction times after restoration')
for tx in restorewo_wallet.listtransactions():
if tx['address'] == wo1:
assert_equal(tx['blocktime'], cur_time + ten_days)
assert_equal(tx['time'], cur_time + ten_days)
elif tx['address'] == wo2:
assert_equal(tx['blocktime'], cur_time + ten_days + ten_days)
assert_equal(tx['time'], cur_time + ten_days + ten_days)
elif tx['address'] == wo3:
assert_equal(tx['blocktime'], cur_time + ten_days + ten_days + ten_days)
assert_equal(tx['time'], cur_time + ten_days + ten_days + ten_days)
if __name__ == '__main__':
TransactionTimeRescanTest().main()
|
py | b4094d278e35b34e26fef6efef46bfad303005b9 | #!/usr/bin/env python
"""
Executes utility scripts to ensure clean data for input.
This is done iteratively. Not automated.
"""
from util import data_processing
def main():
"""Converting JPGs to PNGs as input for stylegan2 on google colab."""
# data_processing.file_to_png(source_directory='data/train/',
# target_directory='data/cleaned_train/')
"""Padding images to square shape and resize if appropriate."""
# data_processing.padd_resize_for_input(
# image_address='data/cleaned_train/',
# dimension=1024, # for stylegan2, the dimensions should be (128,256,512,1024)
# target_address='data/cleaned_train_resized/') # cleaned_train_resized
"""Checking number of channels."""
# arr = data_processing.check_channels(file_path_address='data/cleaned_train_resized',
# image_type='png')
# print(arr) # if empty, go ahead and upload data to google cloud. Else address images.
"""Use the results of the model training ouput for each 20th kimg -> GIF."""
# data_processing.jpg_to_gif(file_path_input='data/Abstract_Progression_Images/',
# file_path_output='data/Abstract_GIF/',
# genre='abstract',
# extend_frames=True,
# duration=0.5)
if __name__ == '__main__':
main()
|
py | b4094daeb27fb5ec6eef333bfe5f789fc12c437d | from flask import Flask
import json
import boto3
from boto3.dynamodb.conditions import Key
from decimal import Decimal
import pandas as pd
from flask_cors import CORS, cross_origin
dynamodb = boto3.resource('dynamodb', region_name='ap-south-1')
table = dynamodb.Table('StudentMonitoring')
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
@app.route("/")
def hello():
return "Hello World from Flask"
@app.route('/user/<int:user_id>')
@cross_origin()
def get_user_data(user_id):
response = table.query(
KeyConditionExpression=Key('id').eq(str(user_id))
)
df = pd.DataFrame(response['Items'])
df[['blink_count', 'id', 'lost_focus_count', 'yawn_count']] = df[
['blink_count', 'id', 'lost_focus_count', 'yawn_count']].astype(int)
df[['ear', 'face_not_present_duration', 'lost_focus_duration', 'pitch', 'roll', 'yaw', 'mar', 'timestamp']] = df[
['ear', 'face_not_present_duration', 'lost_focus_duration', 'pitch', 'roll', 'yaw', 'mar', 'timestamp']].astype(float)
df["datetime"] = pd.to_datetime(df['timestamp'], unit='s')
df2 = df.set_index('timestamp').sort_index(ascending=True)
df2 = df2.sort_values(by='datetime')
df2['hour'] = df2['datetime'].apply(lambda x: x.hour)
df2['minute'] = df2['datetime'].apply(lambda x: x.minute)
df2['second'] = df2['datetime'].apply(lambda x: x.second)
t1 = df2[['minute', 'hour', 'blink_count']].groupby(['hour', 'minute']).max().astype(int) - df2[['hour', 'minute', 'blink_count']].groupby(['hour', 'minute']).min().astype(int)
t2 = df2[['minute', 'hour', 'yawn_count']].groupby(['hour', 'minute']).max().astype(int) - df2[['hour', 'minute', 'yawn_count']].groupby(['hour', 'minute']).min().astype(int)
t3 = df2[['minute', 'hour', 'lost_focus_count']].groupby(['hour', 'minute']).max() - df2[['hour', 'minute', 'lost_focus_count']].groupby(['hour', 'minute']).min()
t4 = (df2[['minute', 'hour', 'lost_focus_duration']].groupby(['hour', 'minute']).max() - df2[['hour', 'minute', 'lost_focus_duration']].groupby(['hour', 'minute']).min()).apply(round).astype(int)
t5 = (df2[['minute', 'hour', 'face_not_present_duration']].groupby(['hour', 'minute']).max() - df2[['hour', 'minute', 'face_not_present_duration']].groupby(['hour', 'minute']).min()).apply(round).astype(int)
out = t1.join(t2).join(t3).join(t4).join(t5)
new_index = out.index.map(lambda x: str(pd.Timestamp("%s:%s" % (x[0], x[1]), unit='minute').time()))
out.index = new_index
blink_count_final = [{"date": i, "value": int(row["blink_count"])} for i, row in out.iterrows()]
yawn_count_final = [{"date": i, "value": int(row["yawn_count"])} for i, row in out.iterrows()]
lost_focus_count_final = [{"date": i, "value": int(row["lost_focus_count"])} for i, row in out.iterrows()]
lost_focus_duration_final = [{"date": i, "value": int(row["lost_focus_duration"])} for i, row in out.iterrows()]
face_not_present_duration_final = [{"date": i, "value": int(row["face_not_present_duration"])} for i, row in out.iterrows()]
output = {
'blink_count': blink_count_final,
'yawn_count_final': yawn_count_final,
'lost_focus_count_final': lost_focus_count_final,
'lost_focus_duration_final': lost_focus_duration_final,
'face_not_present_duration_final': face_not_present_duration_final
}
ypr = df2[['yaw', 'pitch', 'roll', 'datetime']]
ypr.loc[:, 'datetime'] = ypr['datetime'].map(lambda x: str(x.time()))
ypr_final = [{'date': row['datetime'], 'type': 'yaw', 'value': row['yaw']} for i, row in
ypr[['yaw', 'datetime']].iterrows()]
ypr_final.extend({'date': row['datetime'], 'type': 'pitch', 'value': row['pitch']} for i, row in
ypr[['pitch', 'datetime']].iterrows())
ypr_final.extend({'date': row['datetime'], 'type': 'roll', 'value': row['roll']} for i, row in
ypr[['roll', 'datetime']].iterrows())
ypr_data = {"ypr": ypr_final}
output.update(ypr_data)
focus_ratio = (df2['lost_focus_duration'].diff(1) == 0).sum() / len(df2)
lost_focus_ratio = 1 - focus_ratio
face_present_ratio = (df2['face_not_present_duration'].diff(1) == 0).sum() / len(df2)
face_absent_ratio = 1 - face_present_ratio
output.update(
{'focus_ratio': focus_ratio,
'lost_focus_ratio': lost_focus_ratio,
'face_present_ratio': face_present_ratio,
'face_absent_ratio': face_absent_ratio}
)
return json.dumps(output, default=default)
def default(obj):
if isinstance(obj, Decimal):
return str(obj)
raise TypeError("Object of type '%s' is not JSON serializable" % type(obj).__name__)
if __name__ == '__main__':
app.run(host='127.0.0.1', debug=False, port=96) |
py | b4094f0b779f9b6d5760ba676dc6090d0a83417b | from django.shortcuts import render
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
# Create your views here.
def load_index_page(request):
"""
Renders the main page and sending the required information to the globe page.
"""
context = {}
return render(request,"globe/index.html",context=context)
@csrf_exempt
def get_globe_contents(request):
"""
Returns the details for the globe to get the json values
"""
context = {}
context['globe'] = [["1990",[11,79,0.7,10,79,1.3,10,79,1.1,10,79,1.2,10,79,1.4,10,78,1.4,8,78,0.9,9,78,0.8,10,77,1.2]],["1995",[11,79,0.9,10,79,1.2,10,79,1,10,79,1.2,10,79,1.2,10,78,1.3,8,78,0.8,9,78,0.7,10,77,1.1]],["2000",[11,79,1.1,10,79,1,10,79,1.2,10,79,1.8,10,79,1.7,10,78,1.5,8,78,1,9,78,0.9,10,77,1.3]]]
return JsonResponse(context)
def render_internal_page(request):
"""
Rendering internal.html and its required content
"""
context={}
return render(request, "globe/internal.html", context=context)
|
py | b4094ff8d077cdd2b652c8f88bd13e65278907a8 |
import pandas as pd
import numpy as np
from sklearn import cross_validation
from sklearn.preprocessing import Imputer
import sys
sys.path.append('/Users/jamie/xgboost/wrapper')
import xgboost as xgb
np.random.seed(31337)
blackElo = pd.read_csv('Features/BlackElo.txt', names=['blackElo'])
whiteElo = pd.read_csv('Features/WhiteElo.txt', names=['whiteElo'])
stockfish = pd.read_csv('Features/StockSummary.csv')
outOfBook = pd.read_csv('Features/OutOfBookMove.txt', names=['OutOfBook'])
result = pd.read_csv('Features/Results.txt', names=['Result'])
movesToKeep = ['Move'+str(x) for x in range(1,81)]
samplesToKeep = ['SamplePoint'+str(x) for x in range(1,21)]
stockfishFeatureNames = (['gameLength', 'gameDrift', 'gameOscillation', 'whiteGoodShare',
'blackGoodShare', 'whiteBlunders', 'blackBlunders']
+ movesToKeep + samplesToKeep)
bigX = stockfish[stockfishFeatureNames]
bigX['OutOfBook'] = outOfBook
bigX['Result'] = result['Result'].replace({'1-0': 1, '1/2-1/2': 0, '0-1': -1})
for colName in movesToKeep + samplesToKeep:
midCode = (bigX['Result']==0) & (np.isnan(bigX[colName]))
bigX.loc[midCode, colName] = 0
topCode = (bigX['Result']==1) & (np.isnan(bigX[colName]))
bigX.loc[topCode,colName] = 12400
bottomCode = (bigX['Result']==-1) & (np.isnan(bigX[colName]))
bigX.loc[bottomCode,colName] = -12400
fillWithMedian = Imputer(strategy='median', copy=False)
bigXfilled = fillWithMedian.fit_transform(bigX)
def ProjectElo(white, black):
x = ((black+white)/5000)**2
y = white - black
return x, y
def UnprojectElo(x, y):
blackPlusWhite = 5000 * np.sqrt(x)
white = 0.5*(blackPlusWhite + y)
black = 0.5*(blackPlusWhite - y)
return white, black
class MyModel:
def __init__(self, param_iter = 10):
self.random_state = np.random.RandomState(seed=31337)
self.param_iter = param_iter
def fit(self, X, white, black):
avg, diff = ProjectElo(white, black)
dtrain_avg = xgb.DMatrix(X, label=avg)
dtrain_diff = xgb.DMatrix(X, label=diff)
xgb_params = {'max_depth': 7, 'eta':0.05, 'silent':1}
n_rounds = 250
self.gbmAvg_ = xgb.train(xgb_params, dtrain_avg, n_rounds)
self.gbmDiff_ = xgb.train(xgb_params, dtrain_diff, n_rounds)
def predict(self, Xnew):
dtest = xgb.DMatrix(Xnew)
avgP = self.gbmAvg_.predict(dtest)
diffP = self.gbmDiff_.predict(dtest)
return UnprojectElo(avgP, diffP)
nFolds = 10
kf = cross_validation.KFold(n=25000, n_folds=nFolds, shuffle=True, random_state=0)
testErrors = []
for train_index, test_index in kf:
print('Fitting a fold.')
trainX = bigXfilled[train_index,]
testX = bigXfilled[test_index,]
trainWhite = whiteElo['whiteElo'].ix[train_index]
trainBlack = blackElo['blackElo'].ix[train_index]
testActualWhite = whiteElo['whiteElo'].ix[test_index]
testActualBlack = blackElo['blackElo'].ix[test_index]
model = MyModel()
model.fit(trainX, trainWhite, trainBlack)
testPredictedWhite, testPredictedBlack = model.predict(testX)
testErrors.append(float(np.mean(np.abs(np.concatenate(
[testActualWhite - testPredictedWhite,
testActualBlack - testPredictedBlack])))))
bigModel = MyModel()
bigModel.fit(bigXfilled[:25000], whiteElo['whiteElo'].iloc[:25000], blackElo['blackElo'].iloc[:25000])
predictedWhite, predictedBlack = bigModel.predict(bigXfilled[25000:])
print((np.mean(testErrors)))
prediction = pd.DataFrame({'Event': [i for i in range(25001,50001)],
'WhiteElo': np.round(predictedWhite,1),
'BlackElo': np.round(predictedBlack,1)} )
prediction.to_csv('predictions.csv', columns=['Event','WhiteElo','BlackElo'], index=False)
|
py | b409511499994791ae96f21a98cbfe1b54042eba | # Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNING_CA = data_file("pycacert.pem")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
TALOS_INVALID_CRLDP = data_file("talos-2019-0758.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Aug 26 14:23:15 2028 GMT'))
self.assertEqual(p['notBefore'], asn1time('Aug 29 14:23:15 2018 GMT'))
self.assertEqual(p['serialNumber'], '98A7CF88C74A32ED')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2019_5010(self):
p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(
p,
{
'issuer': (
(('countryName', 'UK'),), (('commonName', 'cody-ca'),)),
'notAfter': 'Jun 14 18:00:58 2028 GMT',
'notBefore': 'Jun 18 18:00:58 2018 GMT',
'serialNumber': '02',
'subject': ((('countryName', 'UK'),),
(('commonName',
'codenomicon-vm-2.test.lal.cisco.com'),)),
'subjectAltName': (
('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),),
'version': 3
}
)
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', '[email protected]'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', '[email protected]\[email protected]'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', '[email protected]\[email protected]'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', '[email protected]'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1\n'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with ssl.wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen()
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with ssl.wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
if not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0):
default |= ssl.OP_NO_COMPRESSION
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', '[email protected]'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', '[email protected]'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
getattr(ssl, "OP_SINGLE_DH_USE", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen()
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
try:
s.connect((REMOTE_HOST, 443))
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, (REMOTE_HOST, 443))
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
s.connect((REMOTE_HOST, 443))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex((REMOTE_HOST, 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex((REMOTE_HOST, 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_ex_error(self):
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
rc = s.connect_ex((REMOTE_HOST, 444))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
finally:
s.close()
def test_connect_with_context(self):
with support.transient_internet(REMOTE_HOST):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# Same with a server hostname
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=REMOTE_HOST)
s.connect((REMOTE_HOST, 443))
s.close()
# This should fail because we have no verification certs
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, (REMOTE_HOST, 443))
s.close()
# This should succeed because we specify the root cert
ctx.load_verify_locations(REMOTE_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_cadata(self):
with open(REMOTE_ROOT_CERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect((REMOTE_HOST, 443))
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect((REMOTE_HOST, 443))
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with support.transient_internet(REMOTE_HOST):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect((REMOTE_HOST, 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with support.transient_internet(REMOTE_HOST):
s = socket.socket(socket.AF_INET)
s.connect((REMOTE_HOST, 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
def _test_get_server_certificate(host, port, cert=None):
with support.transient_internet(host):
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
try:
pem = ssl.get_server_certificate((host, port),
ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
self.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
pem = ssl.get_server_certificate((host, port),
ca_certs=cert)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
_test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT)
if support.IPV6_ENABLED:
_test_get_server_certificate('ipv6.google.com', 443)
def test_ciphers(self):
remote = (REMOTE_HOST, 443)
with support.transient_internet(remote[0]):
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(remote)
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(remote)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
with support.transient_internet(REMOTE_HOST):
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s) as ss:
ss.connect((REMOTE_HOST, 443))
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
class NetworkedBIOTests(unittest.TestCase):
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
count = 0
while True:
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_handshake(self):
with support.transient_internet(REMOTE_HOST):
sock = socket.socket(socket.AF_INET)
sock.connect((REMOTE_HOST, 443))
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(REMOTE_ROOT_CERT)
ctx.check_hostname = True
sslobj = ctx.wrap_bio(incoming, outgoing, False, REMOTE_HOST)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# self-signed.pythontest.net probably shuts down the TCP
# connection without sending a secure shutdown message, and
# this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
sock.close()
def test_read_write_data(self):
with support.transient_internet(REMOTE_HOST):
sock = socket.socket(socket.AF_INET)
sock.connect((REMOTE_HOST, 443))
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'GET / HTTP/1.0\r\n\r\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf[:5], b'HTTP/')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
sock.close()
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ssl.SSLError, ConnectionResetError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="localhost") as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_wrong_cert(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server, \
socket.socket() as sock, \
ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
send_meth(indata, *args)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = ssl.wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1')
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_verify_locations(CERTFILE)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
client_context.load_cert_chain(CERTFILE)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True)
except ssl.SSLError as e:
stats = e
if (expected is None and IS_OPENSSL_1_1
and ssl.OPENSSL_VERSION_INFO < (1, 1, 0, 6)):
# OpenSSL 1.1.0 to 1.1.0e raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_shared_ciphers(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
alg1 = "AES256"
alg2 = "AES-256"
else:
client_context.set_ciphers("AES:3DES")
server_context.set_ciphers("3DES")
alg1 = "3DES"
alg2 = "DES-CBC3"
stats = server_params_test(client_context, server_context)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not alg1 in name.split("-") and alg2 not in name:
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_main(verbose=False):
if support.verbose:
import warnings
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
PendingDeprecationWarning,
)
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
tests.append(NetworkedBIOTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
py | b409519709990eb982bfa0f4d530cab1eee8cd56 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['BlobArgs', 'Blob']
@pulumi.input_type
class BlobArgs:
def __init__(__self__, *,
storage_account_name: pulumi.Input[str],
storage_container_name: pulumi.Input[str],
type: pulumi.Input[str],
access_tier: Optional[pulumi.Input[str]] = None,
content_md5: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
parallelism: Optional[pulumi.Input[int]] = None,
size: Optional[pulumi.Input[int]] = None,
source: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]] = None,
source_content: Optional[pulumi.Input[str]] = None,
source_uri: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Blob resource.
:param pulumi.Input[str] storage_account_name: Specifies the storage account in which to create the storage container.
Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_container_name: The name of the storage container in which this blob should be created.
:param pulumi.Input[str] type: The type of the storage blob to be created. Possible values are `Append`, `Block` or `Page`. Changing this forces a new resource to be created.
:param pulumi.Input[str] access_tier: The access tier of the storage blob. Possible values are `Archive`, `Cool` and `Hot`.
:param pulumi.Input[str] content_md5: The MD5 sum of the blob contents. Cannot be defined if `source_uri` is defined, or if blob type is Append or Page. Changing this forces a new resource to be created.
:param pulumi.Input[str] content_type: The content type of the storage blob. Cannot be defined if `source_uri` is defined. Defaults to `application/octet-stream`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: A map of custom blob metadata.
:param pulumi.Input[str] name: The name of the storage blob. Must be unique within the storage container the blob is located.
:param pulumi.Input[int] parallelism: The number of workers per CPU core to run for concurrent uploads. Defaults to `8`.
:param pulumi.Input[int] size: Used only for `page` blobs to specify the size in bytes of the blob to be created. Must be a multiple of 512. Defaults to 0.
:param pulumi.Input[Union[pulumi.Asset, pulumi.Archive]] source: An absolute path to a file on the local system. This field cannot be specified for Append blobs and cannot be specified if `source_content` or `source_uri` is specified.
:param pulumi.Input[str] source_content: The content for this blob which should be defined inline. This field can only be specified for Block blobs and cannot be specified if `source` or `source_uri` is specified.
:param pulumi.Input[str] source_uri: The URI of an existing blob, or a file in the Azure File service, to use as the source contents
for the blob to be created. Changing this forces a new resource to be created. This field cannot be specified for Append blobs and cannot be specified if `source` or `source_content` is specified.
"""
pulumi.set(__self__, "storage_account_name", storage_account_name)
pulumi.set(__self__, "storage_container_name", storage_container_name)
pulumi.set(__self__, "type", type)
if access_tier is not None:
pulumi.set(__self__, "access_tier", access_tier)
if content_md5 is not None:
pulumi.set(__self__, "content_md5", content_md5)
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if name is not None:
pulumi.set(__self__, "name", name)
if parallelism is not None:
pulumi.set(__self__, "parallelism", parallelism)
if size is not None:
pulumi.set(__self__, "size", size)
if source is not None:
pulumi.set(__self__, "source", source)
if source_content is not None:
pulumi.set(__self__, "source_content", source_content)
if source_uri is not None:
pulumi.set(__self__, "source_uri", source_uri)
@property
@pulumi.getter(name="storageAccountName")
def storage_account_name(self) -> pulumi.Input[str]:
"""
Specifies the storage account in which to create the storage container.
Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_name")
@storage_account_name.setter
def storage_account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_account_name", value)
@property
@pulumi.getter(name="storageContainerName")
def storage_container_name(self) -> pulumi.Input[str]:
"""
The name of the storage container in which this blob should be created.
"""
return pulumi.get(self, "storage_container_name")
@storage_container_name.setter
def storage_container_name(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_container_name", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of the storage blob to be created. Possible values are `Append`, `Block` or `Page`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="accessTier")
def access_tier(self) -> Optional[pulumi.Input[str]]:
"""
The access tier of the storage blob. Possible values are `Archive`, `Cool` and `Hot`.
"""
return pulumi.get(self, "access_tier")
@access_tier.setter
def access_tier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_tier", value)
@property
@pulumi.getter(name="contentMd5")
def content_md5(self) -> Optional[pulumi.Input[str]]:
"""
The MD5 sum of the blob contents. Cannot be defined if `source_uri` is defined, or if blob type is Append or Page. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "content_md5")
@content_md5.setter
def content_md5(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_md5", value)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[pulumi.Input[str]]:
"""
The content type of the storage blob. Cannot be defined if `source_uri` is defined. Defaults to `application/octet-stream`.
"""
return pulumi.get(self, "content_type")
@content_type.setter
def content_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_type", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of custom blob metadata.
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the storage blob. Must be unique within the storage container the blob is located.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parallelism(self) -> Optional[pulumi.Input[int]]:
"""
The number of workers per CPU core to run for concurrent uploads. Defaults to `8`.
"""
return pulumi.get(self, "parallelism")
@parallelism.setter
def parallelism(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "parallelism", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[int]]:
"""
Used only for `page` blobs to specify the size in bytes of the blob to be created. Must be a multiple of 512. Defaults to 0.
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]:
"""
An absolute path to a file on the local system. This field cannot be specified for Append blobs and cannot be specified if `source_content` or `source_uri` is specified.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]):
pulumi.set(self, "source", value)
@property
@pulumi.getter(name="sourceContent")
def source_content(self) -> Optional[pulumi.Input[str]]:
"""
The content for this blob which should be defined inline. This field can only be specified for Block blobs and cannot be specified if `source` or `source_uri` is specified.
"""
return pulumi.get(self, "source_content")
@source_content.setter
def source_content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_content", value)
@property
@pulumi.getter(name="sourceUri")
def source_uri(self) -> Optional[pulumi.Input[str]]:
"""
The URI of an existing blob, or a file in the Azure File service, to use as the source contents
for the blob to be created. Changing this forces a new resource to be created. This field cannot be specified for Append blobs and cannot be specified if `source` or `source_content` is specified.
"""
return pulumi.get(self, "source_uri")
@source_uri.setter
def source_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_uri", value)
@pulumi.input_type
class _BlobState:
def __init__(__self__, *,
access_tier: Optional[pulumi.Input[str]] = None,
content_md5: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
parallelism: Optional[pulumi.Input[int]] = None,
size: Optional[pulumi.Input[int]] = None,
source: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]] = None,
source_content: Optional[pulumi.Input[str]] = None,
source_uri: Optional[pulumi.Input[str]] = None,
storage_account_name: Optional[pulumi.Input[str]] = None,
storage_container_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Blob resources.
:param pulumi.Input[str] access_tier: The access tier of the storage blob. Possible values are `Archive`, `Cool` and `Hot`.
:param pulumi.Input[str] content_md5: The MD5 sum of the blob contents. Cannot be defined if `source_uri` is defined, or if blob type is Append or Page. Changing this forces a new resource to be created.
:param pulumi.Input[str] content_type: The content type of the storage blob. Cannot be defined if `source_uri` is defined. Defaults to `application/octet-stream`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: A map of custom blob metadata.
:param pulumi.Input[str] name: The name of the storage blob. Must be unique within the storage container the blob is located.
:param pulumi.Input[int] parallelism: The number of workers per CPU core to run for concurrent uploads. Defaults to `8`.
:param pulumi.Input[int] size: Used only for `page` blobs to specify the size in bytes of the blob to be created. Must be a multiple of 512. Defaults to 0.
:param pulumi.Input[Union[pulumi.Asset, pulumi.Archive]] source: An absolute path to a file on the local system. This field cannot be specified for Append blobs and cannot be specified if `source_content` or `source_uri` is specified.
:param pulumi.Input[str] source_content: The content for this blob which should be defined inline. This field can only be specified for Block blobs and cannot be specified if `source` or `source_uri` is specified.
:param pulumi.Input[str] source_uri: The URI of an existing blob, or a file in the Azure File service, to use as the source contents
for the blob to be created. Changing this forces a new resource to be created. This field cannot be specified for Append blobs and cannot be specified if `source` or `source_content` is specified.
:param pulumi.Input[str] storage_account_name: Specifies the storage account in which to create the storage container.
Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_container_name: The name of the storage container in which this blob should be created.
:param pulumi.Input[str] type: The type of the storage blob to be created. Possible values are `Append`, `Block` or `Page`. Changing this forces a new resource to be created.
:param pulumi.Input[str] url: The URL of the blob
"""
if access_tier is not None:
pulumi.set(__self__, "access_tier", access_tier)
if content_md5 is not None:
pulumi.set(__self__, "content_md5", content_md5)
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if name is not None:
pulumi.set(__self__, "name", name)
if parallelism is not None:
pulumi.set(__self__, "parallelism", parallelism)
if size is not None:
pulumi.set(__self__, "size", size)
if source is not None:
pulumi.set(__self__, "source", source)
if source_content is not None:
pulumi.set(__self__, "source_content", source_content)
if source_uri is not None:
pulumi.set(__self__, "source_uri", source_uri)
if storage_account_name is not None:
pulumi.set(__self__, "storage_account_name", storage_account_name)
if storage_container_name is not None:
pulumi.set(__self__, "storage_container_name", storage_container_name)
if type is not None:
pulumi.set(__self__, "type", type)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter(name="accessTier")
def access_tier(self) -> Optional[pulumi.Input[str]]:
"""
The access tier of the storage blob. Possible values are `Archive`, `Cool` and `Hot`.
"""
return pulumi.get(self, "access_tier")
@access_tier.setter
def access_tier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_tier", value)
@property
@pulumi.getter(name="contentMd5")
def content_md5(self) -> Optional[pulumi.Input[str]]:
"""
The MD5 sum of the blob contents. Cannot be defined if `source_uri` is defined, or if blob type is Append or Page. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "content_md5")
@content_md5.setter
def content_md5(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_md5", value)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[pulumi.Input[str]]:
"""
The content type of the storage blob. Cannot be defined if `source_uri` is defined. Defaults to `application/octet-stream`.
"""
return pulumi.get(self, "content_type")
@content_type.setter
def content_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_type", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of custom blob metadata.
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the storage blob. Must be unique within the storage container the blob is located.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parallelism(self) -> Optional[pulumi.Input[int]]:
"""
The number of workers per CPU core to run for concurrent uploads. Defaults to `8`.
"""
return pulumi.get(self, "parallelism")
@parallelism.setter
def parallelism(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "parallelism", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[int]]:
"""
Used only for `page` blobs to specify the size in bytes of the blob to be created. Must be a multiple of 512. Defaults to 0.
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]:
"""
An absolute path to a file on the local system. This field cannot be specified for Append blobs and cannot be specified if `source_content` or `source_uri` is specified.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]):
pulumi.set(self, "source", value)
@property
@pulumi.getter(name="sourceContent")
def source_content(self) -> Optional[pulumi.Input[str]]:
"""
The content for this blob which should be defined inline. This field can only be specified for Block blobs and cannot be specified if `source` or `source_uri` is specified.
"""
return pulumi.get(self, "source_content")
@source_content.setter
def source_content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_content", value)
@property
@pulumi.getter(name="sourceUri")
def source_uri(self) -> Optional[pulumi.Input[str]]:
"""
The URI of an existing blob, or a file in the Azure File service, to use as the source contents
for the blob to be created. Changing this forces a new resource to be created. This field cannot be specified for Append blobs and cannot be specified if `source` or `source_content` is specified.
"""
return pulumi.get(self, "source_uri")
@source_uri.setter
def source_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_uri", value)
@property
@pulumi.getter(name="storageAccountName")
def storage_account_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the storage account in which to create the storage container.
Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_name")
@storage_account_name.setter
def storage_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_account_name", value)
@property
@pulumi.getter(name="storageContainerName")
def storage_container_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the storage container in which this blob should be created.
"""
return pulumi.get(self, "storage_container_name")
@storage_container_name.setter
def storage_container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_container_name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the storage blob to be created. Possible values are `Append`, `Block` or `Page`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
The URL of the blob
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
class Blob(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_tier: Optional[pulumi.Input[str]] = None,
content_md5: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
parallelism: Optional[pulumi.Input[int]] = None,
size: Optional[pulumi.Input[int]] = None,
source: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]] = None,
source_content: Optional[pulumi.Input[str]] = None,
source_uri: Optional[pulumi.Input[str]] = None,
storage_account_name: Optional[pulumi.Input[str]] = None,
storage_container_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Blob within a Storage Container.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="LRS")
example_container = azure.storage.Container("exampleContainer",
storage_account_name=example_account.name,
container_access_type="private")
example_blob = azure.storage.Blob("exampleBlob",
storage_account_name=example_account.name,
storage_container_name=example_container.name,
type="Block",
source=pulumi.FileAsset("some-local-file.zip"))
```
## Import
Storage Blob's can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:storage/blob:Blob blob1 https://example.blob.core.windows.net/container/blob.vhd
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_tier: The access tier of the storage blob. Possible values are `Archive`, `Cool` and `Hot`.
:param pulumi.Input[str] content_md5: The MD5 sum of the blob contents. Cannot be defined if `source_uri` is defined, or if blob type is Append or Page. Changing this forces a new resource to be created.
:param pulumi.Input[str] content_type: The content type of the storage blob. Cannot be defined if `source_uri` is defined. Defaults to `application/octet-stream`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: A map of custom blob metadata.
:param pulumi.Input[str] name: The name of the storage blob. Must be unique within the storage container the blob is located.
:param pulumi.Input[int] parallelism: The number of workers per CPU core to run for concurrent uploads. Defaults to `8`.
:param pulumi.Input[int] size: Used only for `page` blobs to specify the size in bytes of the blob to be created. Must be a multiple of 512. Defaults to 0.
:param pulumi.Input[Union[pulumi.Asset, pulumi.Archive]] source: An absolute path to a file on the local system. This field cannot be specified for Append blobs and cannot be specified if `source_content` or `source_uri` is specified.
:param pulumi.Input[str] source_content: The content for this blob which should be defined inline. This field can only be specified for Block blobs and cannot be specified if `source` or `source_uri` is specified.
:param pulumi.Input[str] source_uri: The URI of an existing blob, or a file in the Azure File service, to use as the source contents
for the blob to be created. Changing this forces a new resource to be created. This field cannot be specified for Append blobs and cannot be specified if `source` or `source_content` is specified.
:param pulumi.Input[str] storage_account_name: Specifies the storage account in which to create the storage container.
Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_container_name: The name of the storage container in which this blob should be created.
:param pulumi.Input[str] type: The type of the storage blob to be created. Possible values are `Append`, `Block` or `Page`. Changing this forces a new resource to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BlobArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Blob within a Storage Container.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="LRS")
example_container = azure.storage.Container("exampleContainer",
storage_account_name=example_account.name,
container_access_type="private")
example_blob = azure.storage.Blob("exampleBlob",
storage_account_name=example_account.name,
storage_container_name=example_container.name,
type="Block",
source=pulumi.FileAsset("some-local-file.zip"))
```
## Import
Storage Blob's can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:storage/blob:Blob blob1 https://example.blob.core.windows.net/container/blob.vhd
```
:param str resource_name: The name of the resource.
:param BlobArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BlobArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_tier: Optional[pulumi.Input[str]] = None,
content_md5: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
parallelism: Optional[pulumi.Input[int]] = None,
size: Optional[pulumi.Input[int]] = None,
source: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]] = None,
source_content: Optional[pulumi.Input[str]] = None,
source_uri: Optional[pulumi.Input[str]] = None,
storage_account_name: Optional[pulumi.Input[str]] = None,
storage_container_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BlobArgs.__new__(BlobArgs)
__props__.__dict__["access_tier"] = access_tier
__props__.__dict__["content_md5"] = content_md5
__props__.__dict__["content_type"] = content_type
__props__.__dict__["metadata"] = metadata
__props__.__dict__["name"] = name
__props__.__dict__["parallelism"] = parallelism
__props__.__dict__["size"] = size
__props__.__dict__["source"] = source
__props__.__dict__["source_content"] = source_content
__props__.__dict__["source_uri"] = source_uri
if storage_account_name is None and not opts.urn:
raise TypeError("Missing required property 'storage_account_name'")
__props__.__dict__["storage_account_name"] = storage_account_name
if storage_container_name is None and not opts.urn:
raise TypeError("Missing required property 'storage_container_name'")
__props__.__dict__["storage_container_name"] = storage_container_name
if type is None and not opts.urn:
raise TypeError("Missing required property 'type'")
__props__.__dict__["type"] = type
__props__.__dict__["url"] = None
super(Blob, __self__).__init__(
'azure:storage/blob:Blob',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
access_tier: Optional[pulumi.Input[str]] = None,
content_md5: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
parallelism: Optional[pulumi.Input[int]] = None,
size: Optional[pulumi.Input[int]] = None,
source: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]] = None,
source_content: Optional[pulumi.Input[str]] = None,
source_uri: Optional[pulumi.Input[str]] = None,
storage_account_name: Optional[pulumi.Input[str]] = None,
storage_container_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None) -> 'Blob':
"""
Get an existing Blob resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_tier: The access tier of the storage blob. Possible values are `Archive`, `Cool` and `Hot`.
:param pulumi.Input[str] content_md5: The MD5 sum of the blob contents. Cannot be defined if `source_uri` is defined, or if blob type is Append or Page. Changing this forces a new resource to be created.
:param pulumi.Input[str] content_type: The content type of the storage blob. Cannot be defined if `source_uri` is defined. Defaults to `application/octet-stream`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: A map of custom blob metadata.
:param pulumi.Input[str] name: The name of the storage blob. Must be unique within the storage container the blob is located.
:param pulumi.Input[int] parallelism: The number of workers per CPU core to run for concurrent uploads. Defaults to `8`.
:param pulumi.Input[int] size: Used only for `page` blobs to specify the size in bytes of the blob to be created. Must be a multiple of 512. Defaults to 0.
:param pulumi.Input[Union[pulumi.Asset, pulumi.Archive]] source: An absolute path to a file on the local system. This field cannot be specified for Append blobs and cannot be specified if `source_content` or `source_uri` is specified.
:param pulumi.Input[str] source_content: The content for this blob which should be defined inline. This field can only be specified for Block blobs and cannot be specified if `source` or `source_uri` is specified.
:param pulumi.Input[str] source_uri: The URI of an existing blob, or a file in the Azure File service, to use as the source contents
for the blob to be created. Changing this forces a new resource to be created. This field cannot be specified for Append blobs and cannot be specified if `source` or `source_content` is specified.
:param pulumi.Input[str] storage_account_name: Specifies the storage account in which to create the storage container.
Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_container_name: The name of the storage container in which this blob should be created.
:param pulumi.Input[str] type: The type of the storage blob to be created. Possible values are `Append`, `Block` or `Page`. Changing this forces a new resource to be created.
:param pulumi.Input[str] url: The URL of the blob
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _BlobState.__new__(_BlobState)
__props__.__dict__["access_tier"] = access_tier
__props__.__dict__["content_md5"] = content_md5
__props__.__dict__["content_type"] = content_type
__props__.__dict__["metadata"] = metadata
__props__.__dict__["name"] = name
__props__.__dict__["parallelism"] = parallelism
__props__.__dict__["size"] = size
__props__.__dict__["source"] = source
__props__.__dict__["source_content"] = source_content
__props__.__dict__["source_uri"] = source_uri
__props__.__dict__["storage_account_name"] = storage_account_name
__props__.__dict__["storage_container_name"] = storage_container_name
__props__.__dict__["type"] = type
__props__.__dict__["url"] = url
return Blob(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessTier")
def access_tier(self) -> pulumi.Output[str]:
"""
The access tier of the storage blob. Possible values are `Archive`, `Cool` and `Hot`.
"""
return pulumi.get(self, "access_tier")
@property
@pulumi.getter(name="contentMd5")
def content_md5(self) -> pulumi.Output[Optional[str]]:
"""
The MD5 sum of the blob contents. Cannot be defined if `source_uri` is defined, or if blob type is Append or Page. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "content_md5")
@property
@pulumi.getter(name="contentType")
def content_type(self) -> pulumi.Output[Optional[str]]:
"""
The content type of the storage blob. Cannot be defined if `source_uri` is defined. Defaults to `application/octet-stream`.
"""
return pulumi.get(self, "content_type")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of custom blob metadata.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the storage blob. Must be unique within the storage container the blob is located.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parallelism(self) -> pulumi.Output[Optional[int]]:
"""
The number of workers per CPU core to run for concurrent uploads. Defaults to `8`.
"""
return pulumi.get(self, "parallelism")
@property
@pulumi.getter
def size(self) -> pulumi.Output[Optional[int]]:
"""
Used only for `page` blobs to specify the size in bytes of the blob to be created. Must be a multiple of 512. Defaults to 0.
"""
return pulumi.get(self, "size")
@property
@pulumi.getter
def source(self) -> pulumi.Output[Optional[Union[pulumi.Asset, pulumi.Archive]]]:
"""
An absolute path to a file on the local system. This field cannot be specified for Append blobs and cannot be specified if `source_content` or `source_uri` is specified.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter(name="sourceContent")
def source_content(self) -> pulumi.Output[Optional[str]]:
"""
The content for this blob which should be defined inline. This field can only be specified for Block blobs and cannot be specified if `source` or `source_uri` is specified.
"""
return pulumi.get(self, "source_content")
@property
@pulumi.getter(name="sourceUri")
def source_uri(self) -> pulumi.Output[Optional[str]]:
"""
The URI of an existing blob, or a file in the Azure File service, to use as the source contents
for the blob to be created. Changing this forces a new resource to be created. This field cannot be specified for Append blobs and cannot be specified if `source` or `source_content` is specified.
"""
return pulumi.get(self, "source_uri")
@property
@pulumi.getter(name="storageAccountName")
def storage_account_name(self) -> pulumi.Output[str]:
"""
Specifies the storage account in which to create the storage container.
Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_name")
@property
@pulumi.getter(name="storageContainerName")
def storage_container_name(self) -> pulumi.Output[str]:
"""
The name of the storage container in which this blob should be created.
"""
return pulumi.get(self, "storage_container_name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the storage blob to be created. Possible values are `Append`, `Block` or `Page`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def url(self) -> pulumi.Output[str]:
"""
The URL of the blob
"""
return pulumi.get(self, "url")
|
py | b409522b28c0d229debaa0a296fca064aee63f3a | """
Management command for rubber.
"""
import os
import sys
from rubber.management.base import ESBaseCommand
class Command(ESBaseCommand):
def run(self, *args, **options):
if len(args) == 0:
self.print_error("Please provide at least one index.")
sys.exit(1)
for index in args:
config_path = os.path.join(
self.rubber_config.config_root,
'{0}.json'.format(index)
)
self.print_info(u"Using config file : {0}".format(config_path))
body = None
try:
with open(config_path, 'r') as f:
body = f.read()
except IOError:
self.print_error("Config file does not exist.")
continue
self.rubber_config.es.indices.create(index=index, body=body)
self.print_success(u"Index {0} created.".format(index))
|
py | b40953013da32e9a25382c2d04bcd9cbbf685c5d | from .test_dataset import CMD_CREATE_TEST_TABLE
import pytest
import pandas as pd
import numpy as np
import os
from ..dataset import sql_dataset
from .gen_rand_data import rand_df
CMD_DROP_TEST_TABLE_IF_EXISTS = "IF OBJECT_ID('test_table', 'U') IS NOT NULL DROP TABLE test_table;"
CMD_CREATE_TRUNCATED_TEST_TABLE = """
CREATE TABLE test_table (
[dt] datetime NULL,
[uid] nvarchar(10) NULL,
[name] nvarchar(10) NULL,
[empty_col] nvarchar(100) NULL,
[float] decimal(22,3) NULL,
[float_k] decimal(22,3) NULL,
[float_m] decimal(22,13) NULL,
[float_b] decimal(22,9) NULL,
[float_na] decimal(22,3) NULL,
[bit] bit NULL,
[bit_na] bit NULL,
[tinyint] tinyint NULL,
[tinyint_na] tinyint NULL,
[smallint] smallint NULL,
[smallint_na] smallint NULL,
[int] int NULL,
[int_na] int NULL,
[bigint] bigint NULL,
[bigint_na] bigint NULL,
[bool] bit NULL,
[bool_na] bit NULL,
[empty_str_col] nvarchar(100) NULL
);
"""
def cleanup_test_data_csv():
try:
os.remove('./tests/test_data.csv')
except:
pass
def cleanup_test_data_copy_csv():
try:
os.remove('./tests/test_data_copy.csv')
except:
pass
@pytest.fixture(scope='session')
def gen_test_csv(request):
df = rand_df(100000)
df.to_csv('./tests/test_data.csv', encoding='utf-8-sig', index=False)
request.addfinalizer(cleanup_test_data_csv)
def test_read_upload_query_bcp(gen_test_csv, verbose=True):
sd = sql_dataset('./tests/config/integration/database.yml').read()
sd.data['dt'] = pd.to_datetime(sd.data['dt'])
df_orig = sd.data.copy()
sd.send_cmd(CMD_DROP_TEST_TABLE_IF_EXISTS)
sd.upload(mode='overwrite_table', bcp=True, verbose=verbose)
df_queried = sd.query().data
pd.testing.assert_frame_equal(df_queried, df_orig, check_dtype=False, check_names=False)
sd.upload(mode='overwrite_data', bcp=True, verbose=verbose)
df_queried = sd.query().data
pd.testing.assert_frame_equal(df_queried, df_orig, check_dtype=False, check_names=False)
sd.send_cmd(CMD_DROP_TEST_TABLE_IF_EXISTS)
def test_read_upload_query_bcp_truncate(gen_test_csv, verbose=True):
sd = sql_dataset('./tests/config/integration/database.yml').read()
sd.data['dt'] = pd.to_datetime(sd.data['dt'])
df_orig = sd.data.copy()
# create a table too short to test upload(truncate=True/False)
sd.send_cmd(CMD_DROP_TEST_TABLE_IF_EXISTS)
sd.send_cmd(CMD_CREATE_TRUNCATED_TEST_TABLE)
with pytest.raises(ValueError):
# should raise errors because it won't fit
sd.upload(bcp=True, truncate=False, verbose=verbose, mode='overwrite_data')
sd.upload(bcp=True, truncate=True, verbose=verbose, mode='overwrite_data')
df_queried = sd.query().data
# truncate df_orig accordingly for equality assertion
df_orig['uid'] = df_orig['uid'].str[:10]
df_orig['name'] = df_orig['name'].str[:10]
df_orig['float'] = df_orig['float'].round(3)
df_orig['float_k'] = df_orig['float_k'].round(3)
df_orig['float_na'] = df_orig['float_na'].round(3)
pd.testing.assert_frame_equal(df_queried, df_orig, check_dtype=False, check_names=False)
sd.send_cmd(CMD_DROP_TEST_TABLE_IF_EXISTS)
def test_read_upload_query_pyodbc(gen_test_csv, verbose=True):
sd = sql_dataset('./tests/config/integration/database.yml').read()
sd.data['dt'] = pd.to_datetime(sd.data['dt'])
df_orig = sd.data.copy()
sd.send_cmd(CMD_DROP_TEST_TABLE_IF_EXISTS)
sd.upload(mode='overwrite_table', bcp=False, verbose=verbose)
df_queried = sd.query().data
pd.testing.assert_frame_equal(df_queried, df_orig, check_dtype=False, check_names=False)
sd.upload(mode='overwrite_data', bcp=False, verbose=verbose)
df_queried = sd.query().data
pd.testing.assert_frame_equal(df_queried, df_orig, check_dtype=False, check_names=False)
sd.send_cmd(CMD_DROP_TEST_TABLE_IF_EXISTS)
def test_read_upload_query_pyodbc_truncate(gen_test_csv, verbose=True):
sd = sql_dataset('./tests/config/integration/database.yml').read()
sd.data['dt'] = pd.to_datetime(sd.data['dt'])
df_orig = sd.data.copy()
# create a table too short to test upload(truncate=True/False)
sd.send_cmd(CMD_DROP_TEST_TABLE_IF_EXISTS)
sd.send_cmd(CMD_CREATE_TRUNCATED_TEST_TABLE)
with pytest.raises(ValueError):
# should raise errors because it won't fit
sd.upload(bcp=False, truncate=False, verbose=verbose, mode='overwrite_data')
sd.upload(bcp=False, truncate=True, verbose=verbose, mode='overwrite_data')
df_queried = sd.query().data
# truncate df_orig accordingly for equality assertion
df_orig['uid'] = df_orig['uid'].str[:10]
df_orig['name'] = df_orig['name'].str[:10]
df_orig['float'] = df_orig['float'].round(3)
df_orig['float_k'] = df_orig['float_k'].round(3)
df_orig['float_na'] = df_orig['float_na'].round(3)
pd.testing.assert_frame_equal(df_queried, df_orig, check_dtype=False, check_names=False)
sd.send_cmd(CMD_DROP_TEST_TABLE_IF_EXISTS)
def test_read_upload_query_write_bcp(gen_test_csv, verbose=True):
sd = sql_dataset('./tests/config/integration/read_upload_query_write.yml').read()
sd.data['dt'] = pd.to_datetime(sd.data['dt'])
df_orig = sd.data.copy()
sd.send_cmd(CMD_DROP_TEST_TABLE_IF_EXISTS)
cleanup_test_data_copy_csv()
sd.upload(mode='overwrite_table', bcp=True, verbose=verbose)
sd.query().write()
df_queried = pd.read_csv('./tests/test_data_copy.csv')
df_queried['dt'] = pd.to_datetime(df_queried['dt'])
pd.testing.assert_frame_equal(df_queried, df_orig, check_dtype=False, check_names=False)
cleanup_test_data_copy_csv()
sd.upload(mode='overwrite_data', bcp=True, verbose=verbose)
sd.query().write()
df_queried = pd.read_csv('./tests/test_data_copy.csv')
df_queried['dt'] = pd.to_datetime(df_queried['dt'])
pd.testing.assert_frame_equal(df_queried, df_orig, check_dtype=False, check_names=False)
sd.send_cmd(CMD_DROP_TEST_TABLE_IF_EXISTS)
cleanup_test_data_copy_csv()
def test_read_upload_query_write_pyodbc(gen_test_csv, verbose=True):
sd = sql_dataset('./tests/config/integration/read_upload_query_write.yml').read()
sd.data['dt'] = pd.to_datetime(sd.data['dt'])
df_orig = sd.data.copy()
sd.send_cmd(CMD_DROP_TEST_TABLE_IF_EXISTS)
cleanup_test_data_copy_csv()
sd.upload(mode='overwrite_table', bcp=False, verbose=verbose)
sd.query().write()
df_queried = pd.read_csv('./tests/test_data_copy.csv')
df_queried['dt'] = pd.to_datetime(df_queried['dt'])
pd.testing.assert_frame_equal(df_queried, df_orig, check_dtype=False, check_names=False)
cleanup_test_data_copy_csv()
sd.upload(mode='overwrite_data', bcp=False, verbose=verbose)
sd.query().write()
df_queried = pd.read_csv('./tests/test_data_copy.csv')
df_queried['dt'] = pd.to_datetime(df_queried['dt'])
pd.testing.assert_frame_equal(df_queried, df_orig, check_dtype=False, check_names=False)
sd.send_cmd(CMD_DROP_TEST_TABLE_IF_EXISTS)
cleanup_test_data_copy_csv()
|
py | b40953c6bcdd3ca9b6002598918d68a1729ca9fd | # avax-python : Python tools for the exploration of the Avalanche AVAX network.
#
# Documentation at https://crypto.bi
"""
Copyright © 2021 ojrdev
Support this Open Source project!
Donate to X-avax1qr6yzjykcjmeflztsgv6y88dl0xnlel3chs3r4
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# --#--#--
from avaxpython.snow.handlers import json_printer |
py | b4095528b4bf802772721c28dd82a7184ca5ad10 | # TODO : Actually add tests, this is to pass CI
def test_dumb():
expected_number = '1'
actual_number = str(1)
assert actual_number == expected_number
|
py | b409560bceaa8ed6e3c40c3d428407f27bb52915 | """ *******************************************************************************************************************
|
| Name : import_new_groups.py
| Description : Mass creation of groups in the RiskSense platform.
| Project : risksense_tools
| Copyright : (c) RiskSense, Inc.
| License : Apache-2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
|
******************************************************************************************************************* """
import os
import sys
import toml
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'lib'))
import risksense_api as rsapi
class ImportNewGroups:
""" ImportNewGroups class """
def __init__(self):
""" Main body of script """
conf_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'conf', 'config.toml')
config = self.read_config_file(conf_file)
self.rs_platform = config['platform_url']
self.api_key = config['api_key']
self.group_text_filename = config['group_text_filename']
# Instantiate RiskSenseApi
self.rs = rsapi.RiskSenseApi(self.rs_platform, self.api_key)
# Get client id, or validate supplied client ID
if 'client_id' not in config:
client_id = self.get_client_id()
else:
client_id = config['client_id']
try:
self.validate_client_id(client_id)
except ValueError:
print(f"Unable to validate that you belong to client ID {client_id}.")
print("Exiting.")
sys.exit(1)
# Set default client ID
self.rs.set_default_client_id(client_id)
# Read CSV file, and convert data to a dict.
print(f"Reading text file...")
file_lines = self.read_text_file(self.group_text_filename)
# Submit group creation for each item from .csv file
for item in file_lines:
try:
group_id = self.create_group(item['group_name'])
print(f"New group \"{item['group_name']}\" created as group ID {group_id}")
except (rsapi.MaxRetryError, rsapi.StatusCodeError, rsapi.InsufficientPrivileges,
rsapi.UserUnauthorized, ValueError) as ex:
print(f"There was an error trying to create new group \"{item['group_name']}\".")
print(ex)
print()
print("Done. Exiting.")
@staticmethod
def read_config_file(filename):
"""
Reads a TOML-formatted configuration file.
:param filename: Path to the TOML-formatted file to be read.
:type filename: str
:return: Values contained in config file.
:rtype: dict
"""
try:
data = toml.loads(open(filename).read())
return data
except (Exception, FileNotFoundError, toml.TomlDecodeError) as ex:
print("Error reading configuration file.")
print(ex)
print()
input("Please press ENTER to close.")
exit(1)
def validate_client_id(self, submitted_client_id):
"""
Validate the supplied CLIENT_ID variable
:param submitted_client_id: Client ID to validate
:type submitted_client_id: int
:raises: ValueError
"""
my_client_ids = []
for client in self.rs.my_clients:
my_client_ids.append(client['id'])
if submitted_client_id in my_client_ids:
pass
else:
raise ValueError("User not assigned to the submitted client ID.")
def get_client_id(self):
"""
Get the client ID associated with this API key.
:return: Client ID
:rtype: int
"""
return self.rs.my_clients[0]['id']
@staticmethod
def read_text_file(filename):
"""
Read the text file, and return a list of lines.
:param filename: Path to text file to be read.
:type filename: str
:return: The data contained in the text file, in list format.
:rtype: list
"""
return_data = []
input_file = open(filename, 'r')
all_lines = input_file.readlines()
for line in all_lines:
return_data.append(line.strip())
return return_data
def create_group(self, group_name):
"""
Create a new group.
:param group_name: Group Name
:type group_name: str
:return: Group ID
:rtype: int
:raises: ValueError
:raises: RequestFailed
"""
if group_name == "" or group_name is None:
raise ValueError("Group Name required.")
try:
group_id = self.rs.groups.create(group_name)
except (rsapi.MaxRetryError, rsapi.StatusCodeError, rsapi.UserUnauthorized, rsapi.InsufficientPrivileges, Exception):
raise
return group_id
#
# Execute the script
if __name__ == "__main__":
try:
ImportNewGroups()
except KeyboardInterrupt:
print()
print("KeyboardInterrupt detected. Exiting...")
print()
sys.exit(0)
"""
Copyright 2020 RiskSense, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
py | b4095749d3c9381dc19177eaf78e7397dec20ba7 | import multiprocessing
import time
import math
import random
def heavyload(looping_time):
t1 = time.time()
num = 1.99
no_iter = 0
while(time.time() - t1 < looping_time):
for _ in range(1000):
num = (num*num)
num = num/0.25
num = math.sqrt(num)
num = num/2.0
no_iter += 1000
print("after % iter num is % instead of 2" % (no_iter, num))
return no_iter
def lightload(iter_ind, delay):
time.sleep(delay)
print("At iter % " % (iter_ind))
return iter_ind
"""
If we should _not_ re-calclate, this function randomly switches to that we
_should_ re-calculate
"""
def change_state(should_recalculate):
if should_recalculate.value == 0:
random.seed()
if random.randint(0, 101) < 20:
print("The world is a-changin!\n")
should_recalculate.value = 1
def heavyload_loop(looping_time, sim_time, should_recalculate):
t1 = time.time()
while(time.time() - t1 < sim_time):
print(time.time() - t1)
t1 = time.time()
if should_recalculate.value > 0:
heavyload(looping_time)
should_recalculate.value = 0
print("Done re-calculating at ", time.time() - t1)
time.sleep(1.0)
def lightload_loop(delay, sim_time, should_recalculate):
t1 = time.time()
while(time.time() - t1 < sim_time):
if should_recalculate.value > 0:
print("Sleep")
else:
print("Walk")
time.sleep(delay)
def recalculate_loop(delay, sim_time, should_recalculate):
t1 = time.time()
while(time.time() - t1 < sim_time):
change_state(should_recalculate)
time.sleep(delay)
if __name__ == "__main__":
should_recalculate = multiprocessing.Value('i')
should_recalculate.value = 1 # Start with the need for re-calculation
heavyload_t = 2.0
recalc_sample_time = 2.5
inner_loop_sample_time = 0.3
tot_sim_time = 10.0
# creating new process
inner_p = multiprocessing.Process(target=lightload_loop, args=(inner_loop_sample_time, tot_sim_time, should_recalculate))
outer_p = multiprocessing.Process(target=heavyload_loop, args=(heavyload_t, tot_sim_time, should_recalculate))
env_p = multiprocessing.Process(target=recalculate_loop, args=(recalc_sample_time, tot_sim_time, should_recalculate))
# starting process
inner_p.start()
outer_p.start()
env_p.start()
inner_p.join()
outer_p.join()
env_p.join()
# print result array
print("Done!")
|
py | b409575f7913d1f49c804f5d1cc5ad9fb28cdfff | import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
from pycocotools.coco import COCO
class VisualWakeWords(COCO):
def __init__(self, *args):
super(VisualWakeWords, self).__init__(*args)
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
ax = plt.gca()
ax.set_autoscale_on(False)
for ann in anns:
c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]
if ann['category_id'] == 1:
[x, y, width, height] = ann['bbox']
rect = patches.Rectangle((x, y), width, height, edgecolor=c, facecolor=c, linewidth=2, alpha=0.4)
ax.add_patch(rect)
def download(self, *args):
raise AttributeError("Cannot download Visual Wake Words Dataset. "
"See instructions on github.com/Mxbonn/visualwakewords to create"
"the Visual Wake Words Dataset.")
def loadRes(self, resFile):
raise AttributeError("Method not implemented for the Visual Wake Words Dataset.")
def annToRLE(self, ann):
raise AttributeError("Method not implemented for the Visual Wake Words Dataset.")
def annToMask(self, ann):
raise AttributeError("Method not implemented for the Visual Wake Words Dataset.")
|
py | b40957d90b69e471a8d20f8024690039d8bd300e | #!/usr/bin/env python
# Copyright (c) 2012 The LibYuv Project Authors. All rights reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""
Copied from Chrome's src/tools/valgrind/memcheck/PRESUBMIT.py
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
import os
import re
import sys
def CheckChange(input_api, output_api):
"""Checks the memcheck suppressions files for bad data."""
# Add the path to the Chrome valgrind dir to the import path:
tools_vg_path = os.path.join(input_api.PresubmitLocalPath(), '..', '..', '..',
'tools', 'valgrind')
sys.path.append(tools_vg_path)
import suppressions
sup_regex = re.compile('suppressions.*\.txt$')
suppressions = {}
errors = []
check_for_memcheck = False
# skip_next_line has 3 possible values:
# - False: don't skip the next line.
# - 'skip_suppression_name': the next line is a suppression name, skip.
# - 'skip_param': the next line is a system call parameter error, skip.
skip_next_line = False
for f in filter(lambda x: sup_regex.search(x.LocalPath()),
input_api.AffectedFiles()):
for line, line_num in zip(f.NewContents(),
xrange(1, len(f.NewContents()) + 1)):
line = line.lstrip()
if line.startswith('#') or not line:
continue
if skip_next_line:
if skip_next_line == 'skip_suppression_name':
if 'insert_a_suppression_name_here' in line:
errors.append('"insert_a_suppression_name_here" is not a valid '
'suppression name')
if suppressions.has_key(line):
if f.LocalPath() == suppressions[line][1]:
errors.append('suppression with name "%s" at %s line %s '
'has already been defined at line %s' %
(line, f.LocalPath(), line_num,
suppressions[line][1]))
else:
errors.append('suppression with name "%s" at %s line %s '
'has already been defined at %s line %s' %
(line, f.LocalPath(), line_num,
suppressions[line][0], suppressions[line][1]))
else:
suppressions[line] = (f, line_num)
check_for_memcheck = True;
skip_next_line = False
continue
if check_for_memcheck:
if not line.startswith('Memcheck:'):
errors.append('"%s" should be "Memcheck:..." in %s line %s' %
(line, f.LocalPath(), line_num))
check_for_memcheck = False;
if line == '{':
skip_next_line = 'skip_suppression_name'
continue
if line == "Memcheck:Param":
skip_next_line = 'skip_param'
continue
if (line.startswith('fun:') or line.startswith('obj:') or
line.startswith('Memcheck:') or line == '}' or
line == '...'):
continue
errors.append('"%s" is probably wrong: %s line %s' % (line, f.LocalPath(),
line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
def GetPreferredTrySlaves():
# We don't have any memcheck slaves yet, so there's no use for this method.
# When we have, the slave name(s) should be put into this list.
return []
|
py | b40957e9e10dde75adde88df6db8384002c08e35 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = ['EventHubConnection']
class EventHubConnection(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
consumer_group: Optional[pulumi.Input[str]] = None,
data_format: Optional[pulumi.Input[Union[str, 'DataFormat']]] = None,
database_name: Optional[pulumi.Input[str]] = None,
event_hub_connection_name: Optional[pulumi.Input[str]] = None,
event_hub_resource_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
mapping_rule_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
table_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Class representing an event hub connection.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster_name: The name of the Kusto cluster.
:param pulumi.Input[str] consumer_group: The event hub consumer group.
:param pulumi.Input[Union[str, 'DataFormat']] data_format: The data format of the message. Optionally the data format can be added to each message.
:param pulumi.Input[str] database_name: The name of the database in the Kusto cluster.
:param pulumi.Input[str] event_hub_connection_name: The name of the event hub connection.
:param pulumi.Input[str] event_hub_resource_id: The resource ID of the event hub to be used to create a data connection.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] mapping_rule_name: The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message.
:param pulumi.Input[str] resource_group_name: The name of the resource group containing the Kusto cluster.
:param pulumi.Input[str] table_name: The table where the data should be ingested. Optionally the table information can be added to each message.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if cluster_name is None and not opts.urn:
raise TypeError("Missing required property 'cluster_name'")
__props__['cluster_name'] = cluster_name
if consumer_group is None and not opts.urn:
raise TypeError("Missing required property 'consumer_group'")
__props__['consumer_group'] = consumer_group
__props__['data_format'] = data_format
if database_name is None and not opts.urn:
raise TypeError("Missing required property 'database_name'")
__props__['database_name'] = database_name
__props__['event_hub_connection_name'] = event_hub_connection_name
if event_hub_resource_id is None and not opts.urn:
raise TypeError("Missing required property 'event_hub_resource_id'")
__props__['event_hub_resource_id'] = event_hub_resource_id
__props__['location'] = location
__props__['mapping_rule_name'] = mapping_rule_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['table_name'] = table_name
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:kusto:EventHubConnection"), pulumi.Alias(type_="azure-nextgen:kusto/v20170907privatepreview:EventHubConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(EventHubConnection, __self__).__init__(
'azure-nextgen:kusto/v20180907preview:EventHubConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'EventHubConnection':
"""
Get an existing EventHubConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return EventHubConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="consumerGroup")
def consumer_group(self) -> pulumi.Output[str]:
"""
The event hub consumer group.
"""
return pulumi.get(self, "consumer_group")
@property
@pulumi.getter(name="dataFormat")
def data_format(self) -> pulumi.Output[Optional[str]]:
"""
The data format of the message. Optionally the data format can be added to each message.
"""
return pulumi.get(self, "data_format")
@property
@pulumi.getter(name="eventHubResourceId")
def event_hub_resource_id(self) -> pulumi.Output[str]:
"""
The resource ID of the event hub to be used to create a data connection.
"""
return pulumi.get(self, "event_hub_resource_id")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="mappingRuleName")
def mapping_rule_name(self) -> pulumi.Output[Optional[str]]:
"""
The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message.
"""
return pulumi.get(self, "mapping_rule_name")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="tableName")
def table_name(self) -> pulumi.Output[Optional[str]]:
"""
The table where the data should be ingested. Optionally the table information can be added to each message.
"""
return pulumi.get(self, "table_name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | b40958be0812a905a9325089d443ca764648040c | #!/usr/bin/env python
"""
A dcmtk style movescu application
Used for
"""
import argparse
import logging
import os
import socket
import sys
import time
from pydicom.dataset import Dataset
from pydicom.uid import ExplicitVRLittleEndian, ImplicitVRLittleEndian, \
ExplicitVRBigEndian
from pynetdicom import AE, StorageSOPClassList, QueryRetrieveSOPClassList
from pynetdicom.primitives import SCP_SCU_RoleSelectionNegotiation
logger = logging.Logger('movescu')
stream_logger = logging.StreamHandler()
formatter = logging.Formatter('%(levelname).1s: %(message)s')
stream_logger.setFormatter(formatter)
logger.addHandler(stream_logger)
logger.setLevel(logging.ERROR)
def _setup_argparser():
# Description
parser = argparse.ArgumentParser(
description="The movescu application implements a Service Class User "
"(SCU) for the Query/Retrieve (QR) Service Class and a SCP "
" for the Storage Service Class. movescu "
"supports retrieve functionality using the C-MOVE "
"message. It sends query keys to an SCP and waits for a "
"response. It will accept associations for the purpose of "
"receiving images sent as a result of the C-MOVE request. "
"The application can be used to test SCPs of the "
"QR Service Classes. movescu can initiate the transfer of "
"images to a third party or can retrieve images to itself "
"(note: the use of the term 'move' is a misnomer, the "
"C-MOVE operation performs an image copy only)",
usage="movescu [options] peer port dcmfile-in")
# Parameters
req_opts = parser.add_argument_group('Parameters')
req_opts.add_argument("peer", help="hostname of DICOM peer", type=str)
req_opts.add_argument("port", help="TCP/IP port number of peer", type=int)
req_opts.add_argument("dcmfile_in",
metavar="dcmfile-in",
help="DICOM query file(s)",
type=str)
# General Options
gen_opts = parser.add_argument_group('General Options')
gen_opts.add_argument("--version",
help="print version information and exit",
action="store_true")
gen_opts.add_argument("--arguments",
help="print expanded command line arguments",
action="store_true")
gen_opts.add_argument("-q", "--quiet",
help="quiet mode, print no warnings and errors",
action="store_true")
gen_opts.add_argument("-v", "--verbose",
help="verbose mode, print processing details",
action="store_true")
gen_opts.add_argument("-d", "--debug",
help="debug mode, print debug information",
action="store_true")
gen_opts.add_argument("-ll", "--log-level", metavar='[l]',
help="use level l for the logger (fatal, error, warn, "
"info, debug, trace)",
type=str,
choices=['fatal', 'error', 'warn',
'info', 'debug', 'trace'])
gen_opts.add_argument("-lc", "--log-config", metavar='[f]',
help="use config file f for the logger",
type=str)
# Network Options
net_opts = parser.add_argument_group('Network Options')
net_opts.add_argument("-aet", "--calling-aet", metavar='[a]etitle',
help="set my calling AE title (default: MOVESCU)",
type=str,
default='MOVESCU')
net_opts.add_argument("-aec", "--called-aet", metavar='[a]etitle',
help="set called AE title of peer (default: ANY-SCP)",
type=str,
default='ANY-SCP')
net_opts.add_argument("-aem", "--move-aet", metavar='[a]etitle',
help="set move destination AE title (default: "
"MOVESCP)",
type=str,
default='MOVESCP')
# Query information model choices
qr_group = parser.add_argument_group('Query Information Model Options')
qr_model = qr_group.add_mutually_exclusive_group()
qr_model.add_argument("-P", "--patient",
help="use patient root information model (default)",
action="store_true",
)
qr_model.add_argument("-S", "--study",
help="use study root information model",
action="store_true")
qr_model.add_argument("-O", "--psonly",
help="use patient/study only information model",
action="store_true")
return parser.parse_args()
args = _setup_argparser()
if args.verbose:
logger.setLevel(logging.INFO)
pynetdicom_logger = logging.getLogger('pynetdicom')
pynetdicom_logger.setLevel(logging.INFO)
if args.debug:
logger.setLevel(logging.DEBUG)
pynetdicom_logger = logging.getLogger('pynetdicom')
pynetdicom_logger.setLevel(logging.DEBUG)
logger.debug('$movescu.py v%s %s $' %('0.1.0', '2016-03-15'))
logger.debug('')
# Create application entity
# Binding to port 0 lets the OS pick an available port
ae = AE(ae_title=args.calling_aet,
port=0,
scu_sop_class=QueryRetrieveSOPClassList,
scp_sop_class=StorageSOPClassList,
transfer_syntax=[ExplicitVRLittleEndian])
# Set the extended negotiation SCP/SCU role selection to allow us to receive
# C-STORE requests for the supported SOP classes
ext_neg = []
for context in ae.presentation_contexts_scu:
tmp = SCP_SCU_RoleSelectionNegotiation()
tmp.sop_class_uid = context.AbstractSyntax
tmp.scu_role = False
tmp.scp_role = True
ext_neg.append(tmp)
# Request association with remote
assoc = ae.associate(args.peer, args.port, args.called_aet, ext_neg=ext_neg)
# Create query dataset
d = Dataset()
d.PatientsName = '*'
d.QueryRetrieveLevel = "PATIENT"
if args.patient:
query_model = 'P'
elif args.study:
query_model = 'S'
elif args.psonly:
query_model = 'O'
else:
query_model = 'P'
def on_c_store(sop_class, dataset):
"""
Function replacing ApplicationEntity.on_store(). Called when a dataset is
received following a C-STORE. Write the received dataset to file
Parameters
----------
sop_class - pydicom.SOPclass.StorageServiceClass
The StorageServiceClass representing the object
dataset - pydicom.Dataset
The DICOM dataset sent via the C-STORE
Returns
-------
status
A valid return status, see the StorageServiceClass for the
available statuses
"""
filename = 'CT.%s' %dataset.SOPInstanceUID
logger.info('Storing DICOM file: %s' %filename)
if os.path.exists(filename):
logger.warning('DICOM file already exists, overwriting')
#logger.debug("pydicom::Dataset()")
meta = Dataset()
meta.MediaStorageSOPClassUID = dataset.SOPClassUID
meta.MediaStorageSOPInstanceUID = '1.2.3'
meta.ImplementationClassUID = '1.2.3.4'
#logger.debug("pydicom::FileDataset()")
ds = FileDataset(filename, {}, file_meta=meta, preamble=b"\0" * 128)
ds.update(dataset)
ds.is_little_endian = True
ds.is_implicit_VR = True
#logger.debug("pydicom::save_as()")
ds.save_as(filename)
return sop_class.Success
ae.on_c_store = on_c_store
# Send query
if assoc.is_established:
if args.move_aet:
response = assoc.send_c_move(d, args.move_aet, query_model=query_model)
else:
response = assoc.send_c_move(d, args.calling_aet, query_model=query_model)
time.sleep(1)
for (status, d) in response:
pass
assoc.release()
|
py | b4095910c4b654b651e116c00c1995dfc062ac96 | items = [x for x in input("input a seq of words seperated by comma: ").split(',')]
items.sort()
print(','.join(items)) |
py | b4095992f3a67c7a12a457d3903ad831ac96d6bf | '''Trains a simple convnet on the MNIST dataset.
Gets to 98.96% test accuracy after 12 epochs
'''
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from layers import SE
batch_size = 128
num_classes = 10
epochs = 12
ratio = 4
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(SE(ratio))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(SE(ratio))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
|
py | b40959edfb837a91491d58e2af955814f6b312e9 | # Copyright 2016-2018 Iowa State University Research Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from delamo.api import DelamoModeler
from delamo.api import Layer
from delamo.api import bond_layers
from delamo.api import SimpleCoordSys
from delamo import process
from delamo.layer import LayerMold
import os
# Front matter
# ------------
# Initialize the DeLaMo model
DM=DelamoModeler.Initialize(globals(),
pointtolerancefactor=100.0,
normaltolerance=100e-4,
GapWidth=0)
# This script then generates both a CAD file and a Python script.
# The Python script can be run from Abaqus. It includes the
# initialization script referenced above, and also opens
# the CAD file and builds the model.
# The name of the script file to generate and
# the name of the CAD file to write are returned
# by process.output_filenames()
# The first parameter to output_filenames
# should always match the name of the original script
# with the ".py" stripped
# In manually generated scripts, always specify phase
# to be "ORIGINAL"
(script_to_generate,
cad_file_path_from_script,
layer_boundary_template) = process.output_filenames("03_CohesiveLayer",phase="ORIGINAL")
# When writing a DeLaMo script, you start by creating a
# finite element initialization script. This is a
# Python script for ABAQUS that defines your various parameters
# -- material properties, etc. as Python variables.
# In this case they are stored in the "abqparams_CFRP.py" file
DM.abaqus_init_script("abqparams_CFRP.py",globals())
DM.abaqus_init_script("abqparams_CFRP_cohesive.py",globals())
# The above call automatically inserts wrapped copies of variables
# defined in those scripts into the global variable space. Then you
# can reference those variables in this script
# (you can have as many init scripts as you like)
# The Delamo model contains generates several sets of instructions
# for different phases of the finite element modeling process:
# DM.initinstrs (initialization)
# DM.assemblyinstrs (model assembly)
# DM.bcinstrs (boundary conditions)
# DM.meshinstrs (meshing)
# All methods called from those variables will go generally be executed
# in the assemblyinstrs pool unless otherwise overridden. You can
# use e.g. DM.meshinstrs.rewrapobj() to get a reference to
# one of these variables that will execute in an alternate context.
#
# For example,
LaminateAssemblyMeshing=DM.meshinstrs.rewrapobj(LaminateAssembly)
# Creates a reference to the LaminateAssembly, for which method calls
# execute in the meshing context
# Basic parameters
# Set layer thickness we are planning on using
thickness = 0.199
# Set cohesive layer thickness
cohesivethickness = 0.001
# Load a NURBS mold surface from a file
Mold = LayerMold.FromFile(os.path.join("..","data","CurvedMold1.STEP"))
# Define a coordinate system
# This example defines +x direction along 0 deg. fibers,
# +y direction across 0 deg fibers, equivalent to
# the default (when coordsys is not specified)
coordsys=SimpleCoordSys((1.0,0.0,0.0),(0.0,1.0,0.0))
# Create 1st layer by moving the distance specified by thickness
# in the OFFSET_DIRECTION
layer1 = Layer.CreateFromMold(DM,Mold,"OFFSET",thickness,"Layer_1",LaminaSection,0,coordsys=coordsys)
# Once any breaks, etc. of a given layer are complete, it must be
# finalized.
layer1.Finalize(DM)
# The MeshSimple method is a shortcut over the underlying ABAQUS routines
# It loops over each part in the layer and calls setElementType() with
# the specified MeshElemTypes, setMeshControls() with the given shape
# and technique, and seedPart() with the given mesh size, deviation factor,
# and minsizefactor. and refines the mesh near any given refined_edges
# Note that ABAQUS constants must be referenced as part of abqC
# rather than used directly
layer1.MeshSimple(MeshElemTypes,meshsize,abqC.HEX_DOMINATED,abqC.SYSTEM_ASSIGN)
# Create and add point marker for fixed faced boundary condition
# There is a surface at y=-25 mm from z= 0...0.2 mm
# This point identifies it
FixedPoint=[-40.0,-50.0,0.1]
# Define a fixed boundary condition based on that point.
# EncastreBC is an ABAQUS function that was found by
# using the ABAQUS/CAE interface and then looking at the
# replay (.rpy) file.
FEModel.EncastreBC(name="FixedFace_%d" % (DM.get_unique()),
createStepName=ApplyForceStep.name,
region=layer1.singlepart.GetInstanceFaceRegion(FixedPoint,0.07))
# Create 2nd layer
layer2 = Layer.CreateFromMold(DM,layer1.gk_layer.OffsetMold(),"OFFSET",thickness,"Layer_2", LaminaSection,-45,coordsys=coordsys)
layer2.Finalize(DM)
layer2.MeshSimple(MeshElemTypes,meshsize/1.8,abqC.HEX_DOMINATED,abqC.SYSTEM_ASSIGN)
# Bond layers 1 and 2. With no other parameters, the layers are attached
# with a TIE boundary condition
bond_layers(DM,layer1, layer2)
# Update and add point marker for fixed faced boundary condition
FixedPoint[2]+=thickness
FEModel.EncastreBC(name="FixedFace_%d" % (DM.get_unique()),
createStepName=ApplyForceStep.name,
region=layer2.singlepart.GetInstanceFaceRegion(FixedPoint,0.07))
# Create cohesive layer
layer23cohesive = Layer.CreateFromMold(DM,layer2.gk_layer.OffsetMold(),"OFFSET",cohesivethickness,"Layer23cohesive", CohesiveSection,0) # Orientation doesn't really matter since we have made the cohesive layer isotropic
#layer23cohesive.Finalize(DM) # bond_layers() will do the finalize step on the cohesive layer so we don't have to
# Update and add point marker for fixed faced boundary condition
FixedPoint[2]+=cohesivethickness
# Create 3rd layer
layer3 = Layer.CreateFromMold(DM,layer23cohesive.gk_layer.OffsetMold(),"OFFSET",thickness,"Layer_3",LaminaSection,45,coordsys=coordsys)
layer3.Finalize(DM)
layer3.MeshSimple(MeshElemTypes,meshsize/1.8,abqC.HEX_DOMINATED,abqC.SYSTEM_ASSIGN)
# The line below performs a bonding operation with a delamination
# and a contact zone inside the delamination surrounded by a
# cohesive zone into which the delamination may grow
bond_layers(DM,layer2, layer3,
cohesive_layer=layer23cohesive,
defaultBC="COHESIVE_LAYER",
delaminationlist=[os.path.join("..","data","nasa-delam12-1.csv")],
ContactInteraction=ContactInteraction)
layer23cohesive.MeshCohesive(meshsize/1.8,abqC.HEX_DOMINATED)
# Update and add point marker for fixed faced boundary condition
FixedPoint[2]+=thickness
FEModel.EncastreBC(name="FixedFace_%d" % (DM.get_unique()),
createStepName=ApplyForceStep.name,
region=layer3.singlepart.GetInstanceFaceRegion(FixedPoint,0.07))
# Create 4th layer
layer4 = Layer.CreateFromMold(DM,layer3.gk_layer.OffsetMold(),"OFFSET",thickness,"Layer_4",LaminaSection,90,coordsys=coordsys)
layer4.Finalize(DM)
layer4.MeshSimple(MeshElemTypes,meshsize/2.0,abqC.HEX_DOMINATED,abqC.SYSTEM_ASSIGN)
bond_layers(DM,layer3, layer4)
# Update and add point marker for fixed faced boundary condition
FixedPoint[2]+=thickness
FEModel.EncastreBC(name="FixedFace_%d" % (DM.get_unique()),
createStepName=ApplyForceStep.name,
region=layer4.singlepart.GetInstanceFaceRegion(FixedPoint,0.07))
# Create 5th layer over the layer 4 or the stiffener contour, if present
# ... for we just tell it to follow the layer 4 contour, which
# the stiffener automagically expanded
layer5 = Layer.CreateFromMold(DM,layer4.gk_layer.OffsetMold(),"OFFSET",thickness,"Layer_5",LaminaSection,90,coordsys=coordsys)
layer5.Finalize(DM)
layer5.MeshSimple(MeshElemTypes,meshsize/2.0,abqC.HEX_DOMINATED,abqC.SYSTEM_ASSIGN)
bond_layers(DM,layer4, layer5)
# Update and add point marker for fixed faced boundary condition
FixedPoint[2]+=thickness
FixedPoint[1]-=.07 # accommodate outward shift as we go up
FEModel.EncastreBC(name="FixedFace_%d" % (DM.get_unique()),
createStepName=ApplyForceStep.name,
region=layer5.singlepart.GetInstanceFaceRegion(FixedPoint,0.07))
# Create 6th layer
layer6 = Layer.CreateFromMold(DM,layer5.gk_layer.OffsetMold(),"OFFSET",thickness,"Layer_6",LaminaSection,45,coordsys=coordsys)
layer6.Finalize(DM)
layer6.MeshSimple(MeshElemTypes,meshsize/2.0,abqC.HEX_DOMINATED,abqC.SYSTEM_ASSIGN)
bond_layers(DM,layer5, layer6)
# Update and add point marker for fixed faced boundary condition
FixedPoint[2]+=thickness
FEModel.EncastreBC(name="FixedFace_%d" % (DM.get_unique()),
createStepName=ApplyForceStep.name,
region=layer6.singlepart.GetInstanceFaceRegion(FixedPoint,0.07))
# Create 7th layer
layer7 = Layer.CreateFromMold(DM,layer6.gk_layer.OffsetMold(),"OFFSET",thickness,"Layer_7",LaminaSection,-45,coordsys=coordsys)
layer7.Finalize(DM)
layer7.MeshSimple(MeshElemTypes,meshsize/2.0,abqC.HEX_DOMINATED,abqC.SYSTEM_ASSIGN)
bond_layers(DM,layer6, layer7)
# Update and add point marker for fixed faced boundary condition
FixedPoint[2]+=thickness
FEModel.EncastreBC(name="FixedFace_%d" % (DM.get_unique()),
createStepName=ApplyForceStep.name,
region=layer7.singlepart.GetInstanceFaceRegion(FixedPoint,0.07))
# Create 8th layer
layer8 = Layer.CreateFromMold(DM,layer7.gk_layer.OffsetMold(),"OFFSET",thickness,"Layer_8",LaminaSection,0,coordsys=coordsys)
layer8.Finalize(DM)
layer8.MeshSimple(MeshElemTypes,meshsize/2.0,abqC.HEX_DOMINATED,abqC.SYSTEM_ASSIGN)
bond_layers(DM,layer7, layer8)
# Update and add point marker for fixed faced boundary condition
FixedPoint[2]+=thickness
FEModel.EncastreBC(name="FixedFace_%d" % (DM.get_unique()),
createStepName=ApplyForceStep.name,
region=layer8.singlepart.GetInstanceFaceRegion(FixedPoint,0.07))
# Can define a "Surface" that is visible in the Abaqus output database
# This is a direct ABAQUS call on the part object
# within layer1 (assumes layer1 is not split due to fiber/matrix breakage)
layer1.singlepart.fe_part.Surface(name="ForceSurface",
side1Faces=layer1.singlepart.GetPartFace((-49.0,-49.0,thickness*0),0.1))
ForceVector=[ 0.0, 0.0, -5e-2 ] # Units of MPa
# Call ABAQUS SurfaceTraction method
# Again, this came from looking at ABAQUS replay (.rpy) output
# Observe again that all ABAQUS symbolic constants need the "abqC"
# prefix.
FEModel.SurfaceTraction(name="SurfaceTraction_%d" % (DM.get_unique()),
createStepName=ApplyForceStep.name,
region=layer1.singlepart.GetInstanceFaceRegionSurface((-49.0,-49.0,thickness*0.0),0.1),
distributionType=abqC.UNIFORM,
field='',
localCsys=None,
traction=abqC.GENERAL,
follower=abqC.OFF,
resultant=abqC.ON,
magnitude=np.linalg.norm(ForceVector),
directionVector=((0.0,0.0,0.0),tuple(ForceVector/np.linalg.norm(ForceVector))),
amplitude=abqC.UNSET)
# You can have the job auto-start when the Python script is run
#DM.RunJob(BendingJob)
# Finalization generates the output script and CAD model.
DM.Finalize(script_to_generate,cad_file_path_from_script)
|
py | b4095a85f7f739f74fba6b0a1796cb0b81181679 | from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu, fsl
from ...niworkflows.engine.workflows import LiterateWorkflow as Workflow
from ...niworkflows.interfaces import NormalizeMotionParams
from ...niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
from ...niworkflows.interfaces.itk import MCFLIRT2ITK
from ...niworkflows.interfaces.cbf_computation import (extractCBF,computeCBF
,scorescrubCBF,BASILCBF,refinemask,qccbf)
from ...niworkflows.interfaces.utility import KeySelect
import nibabel as nb
import numpy as np
import os,sys
from ...config import DEFAULT_MEMORY_MIN_GB
def init_cbf_compt_wf(mem_gb,metadata,aslcontext,pcasl,omp_nthreads, name='cbf_compt_wf'):
workflow = Workflow(name=name)
workflow.__desc__ = """\
The CBF was quantified from *preproccessed* ASL data using a relatively basic model
[@detre_perfusion,@alsop_recommended]. CBF are susceptible to artifacts due to low signal to noise ratio and sensitivity
to motion, Structural Correlation based Outlier Rejection (SCORE) algothim was applied to the CBF to
discard few extreme outliers [@score_dolui]. Furthermore,Structural Correlation with RobUst Bayesian (SCRUB)
algorithms was applied to the CBF by iteratively reweighted CBF with structural tissues probalility maps
[@scrub_dolui]. Alternate method of CBF computation is Bayesian Inference for Arterial Spin Labeling (BASIL)
as implmented in FSL which is based on Bayeisan inference principles [@chappell_basil].
BASIL computed the CBF from ASL incoporating natural varaibility of other model parameters and spatial regularization
of the estimated perfusion image. BASIL also included correction for partial volume effects [@chappell_pvc].
"""
inputnode = pe.Node(niu.IdentityInterface(
fields=['bold', 'bold_mask','t1w_tpms','t1w_mask','t1_bold_xform','itk_bold_to_t1']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['out_cbf', 'out_mean','out_score','out_avgscore','out_scrub',
'out_scoreindex','out_cbfb','out_cbfpv']),
name='outputnode')
# convert tmps to bold_space
csf_tfm = pe.Node(ApplyTransforms(interpolation='NearestNeighbor', float=True),
name='csf_tfm', mem_gb=0.1)
wm_tfm = pe.Node(ApplyTransforms(interpolation='NearestNeighbor', float=True),
name='wm_tfm', mem_gb=0.1)
gm_tfm = pe.Node(ApplyTransforms(interpolation='NearestNeighbor', float=True),
name='gm_tfm', mem_gb=0.1)
labeltype=metadata['LabelingType']
if 'CASL' in labeltype:
pcasl=True
elif 'PASL' in labeltype:
pcasl=False
else:
print('unknown label type')
extractcbf = pe.Node(extractCBF(in_ASLcontext=aslcontext),mem_gb=0.2,run_without_submitting=True,name="extractcbf")
computecbf = pe.Node(computeCBF(in_metadata=metadata),mem_gb=0.2,
run_without_submitting=True,name="computecbf")
scorescrub= pe.Node(scorescrubCBF(in_thresh=0.7,in_wfun='huber'),
name='scorescrub',run_without_submitting=True,mem_gb=0.2)
basilcbf= pe.Node(BASILCBF(m0scale=metadata["M0"],
bolus=metadata["InitialPostLabelDelay"],m0tr=metadata['RepetitionTime'],pvc=True,
tis=np.add(metadata["InitialPostLabelDelay"],metadata["LabelingDuration"]),
pcasl=pcasl,out_basename=os.getcwd()),
name='basilcbf',run_without_submitting=True,mem_gb=0.2)
refinemaskj=pe.Node(refinemask(),mem_gb=0.2,run_without_submitting=True,name="refinemask")
#def _getTR(file):
#import nibabel as nb
#motr=nb.load(file).header.get_zooms()[3]
#return motr
def _pick_csf(files):
return files[0]
def _pick_gm(files):
return files[1]
def _pick_wm(files):
return files[-1]
workflow.connect([
# extract CBF data and compute cbf
(inputnode, extractcbf, [('bold','in_file')]),
(extractcbf, computecbf, [('out_file','in_cbf'),('out_avg','in_m0file')]),
#(inputnode,computecbf,[('bold_mask','in_mask')]),
(inputnode,refinemaskj,[('t1w_mask','in_t1mask'),('bold_mask','in_boldmask'),
('t1_bold_xform','transforms')]),
(inputnode,computecbf,[('bold_mask','in_mask')]),
(inputnode,scorescrub,[('bold_mask','in_mask')]),
(inputnode,basilcbf,[('bold_mask','mask')]),
# extract probability maps
(inputnode, csf_tfm, [('bold_mask', 'reference_image'),
('t1_bold_xform', 'transforms')]),
(inputnode, csf_tfm, [(('t1w_tpms', _pick_csf), 'input_image')]),
(inputnode, wm_tfm, [('bold_mask', 'reference_image'),
('t1_bold_xform', 'transforms')]),
(inputnode, wm_tfm, [(('t1w_tpms', _pick_wm), 'input_image')]),
(inputnode, gm_tfm, [('bold_mask', 'reference_image'),
('t1_bold_xform', 'transforms')]),
(inputnode, gm_tfm, [(('t1w_tpms', _pick_gm), 'input_image')]),
(computecbf,scorescrub,[('out_cbf','in_file')]),
(gm_tfm,scorescrub,[('output_image','in_greyM')]),
(wm_tfm,scorescrub,[('output_image','in_whiteM')]),
(csf_tfm,scorescrub,[('output_image','in_csf')]),
#(inputnode,scorescrub,[('bold_mask','in_mask')]),
(extractcbf,basilcbf,[('out_file','in_file')]),
(gm_tfm,basilcbf,[('output_image','pvgm')]),
(wm_tfm,basilcbf,[('output_image','pvwm')]),
#(inputnode,basilcbf,[('bold_mask','mask')]),
(extractcbf,basilcbf,[('out_avg','mzero')]),
(basilcbf,outputnode,[('out_cbfb','out_cbfb'),
('out_cbfpv','out_cbfpv')]),
(computecbf,outputnode,[('out_cbf','out_cbf'),
('out_mean','out_mean')]),
(scorescrub,outputnode,[('out_score','out_score'),('out_scoreindex','out_scoreindex'),
('out_avgscore','out_avgscore'),('out_scrub','out_scrub')]),
])
return workflow
def init_cbfqc_compt_wf(mem_gb,bold_file,metadata,omp_nthreads, name='cbfqc_compt_wf'):
workflow = Workflow(name=name)
workflow.__desc__ = """\
The following quality control (qc) measures was estimated: framewise displacement and relative root mean square dice index.
Other qc meaure include dice and jaccard indices, cross-correlation and coverage that estimate the coregistration
quality of ASL and T1W images and normalization quality of ASL to template. Quality evaluation index (QEI)
was also computed for CBF [@cbfqc]. The QEI is automated for objective quality evaluation of CBF maps and measured
the CBF quality based on structural similarity,spatial variability and the percentatge of voxels with negtaive CBF within Grey matter
"""
inputnode = pe.Node(niu.IdentityInterface(
fields=['meancbf','avgscore','scrub','basil','pv','bold_mask','t1w_tpms','t1w_mask','t1_bold_xform','bold_mask_std',
'confmat']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(fields=['qc_file']),
name='outputnode')
def _pick_csf(files):
return files[0]
def _pick_gm(files):
return files[1]
def _pick_wm(files):
return files[-1]
csf_tfm = pe.Node(ApplyTransforms(interpolation='NearestNeighbor', float=True),
name='csf_tfm', mem_gb=0.1)
wm_tfm = pe.Node(ApplyTransforms(interpolation='NearestNeighbor', float=True),
name='wm_tfm', mem_gb=0.1)
gm_tfm = pe.Node(ApplyTransforms(interpolation='NearestNeighbor', float=True),
name='gm_tfm', mem_gb=0.1)
mask_tfm = pe.Node(ApplyTransforms(interpolation='NearestNeighbor', float=True),
name='masktonative', mem_gb=0.1)
from templateflow.api import get as get_template
brain_mask = str(get_template(
'MNI152NLin2009cAsym', resolution=2, desc='brain', suffix='mask'))
from nipype.interfaces.afni import Resample
resample = pe.Node(Resample(in_file=brain_mask,outputtype='NIFTI_GZ'),name='resample', mem_gb=0.1)
#template_tfm = pe.Node(ApplyTransforms(interpolation='NearestNeighbor', float=True,input_image=brain_mask),
#name='template_tfm', mem_gb=0.1)
qccompute=pe.Node(qccbf(in_file=bold_file),name='qccompute',run_without_submitting=True,mem_gb=0.2)
workflow.connect([(inputnode, csf_tfm, [('bold_mask', 'reference_image'),
('t1_bold_xform', 'transforms')]),
(inputnode, csf_tfm, [(('t1w_tpms', _pick_csf), 'input_image')]),
(inputnode, wm_tfm, [('bold_mask', 'reference_image'),
('t1_bold_xform', 'transforms')]),
(inputnode, wm_tfm, [(('t1w_tpms', _pick_wm), 'input_image')]),
(inputnode, gm_tfm, [('bold_mask', 'reference_image'),
('t1_bold_xform', 'transforms')]),
(inputnode, gm_tfm, [(('t1w_tpms', _pick_gm), 'input_image')]),
(inputnode, mask_tfm, [('bold_mask', 'reference_image'),
('t1_bold_xform', 'transforms'),('t1w_mask', 'input_image')]),
(mask_tfm,qccompute,[('output_image','in_t1mask')]),
(inputnode,qccompute,[('bold_mask','in_boldmask'),
('confmat','in_confmat')]),
(inputnode,qccompute,[(('bold_mask_std',_pick_csf),'in_boldmaskstd')]),
(inputnode,resample,[(('bold_mask_std',_pick_csf),'master')]),
(resample,qccompute,[('out_file','in_templatemask')]),
(gm_tfm,qccompute,[('output_image','in_greyM')]),
(wm_tfm,qccompute,[('output_image','in_whiteM')]),
(csf_tfm,qccompute,[('output_image','in_csf')]),
(inputnode,qccompute,[('scrub','in_scrub'),
('meancbf','in_meancbf'),('avgscore','in_avgscore'),
('basil','in_basil'),('pv','in_pvc')]),
(qccompute,outputnode,[('qc_file','qc_file')]),
])
return workflow
|
py | b4095ae233b41d84220b82bcab244cc06c83582e | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# justice-social-service (1.29.2)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.social import bulk_inc_user_stat_item_1 as bulk_inc_user_stat_item_1_internal
from accelbyte_py_sdk.api.social.models import BulkStatItemInc
from accelbyte_py_sdk.api.social.models import BulkStatItemOperationResult
from accelbyte_py_sdk.api.social.models import ValidationErrorEntity
@click.command()
@click.argument("user_id", type=str)
@click.option("--body", "body", type=str)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def bulk_inc_user_stat_item_1(
user_id: str,
body: Optional[str] = None,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(bulk_inc_user_stat_item_1_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {
"Authorization": login_with_auth
}
else:
login_as_internal(login_as)
if body is not None:
try:
body_json = json.loads(body)
body = [BulkStatItemInc.create_from_dict(i0) for i0 in body_json]
except ValueError as e:
raise Exception(f"Invalid JSON for 'body'. {str(e)}") from e
result, error = bulk_inc_user_stat_item_1_internal(
user_id=user_id,
body=body,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"bulkIncUserStatItem_1 failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
bulk_inc_user_stat_item_1.operation_id = "bulkIncUserStatItem_1"
bulk_inc_user_stat_item_1.is_deprecated = False
|
py | b4095b68c9fc1d7f9b7c55d124c95964dbf8b95e | # import modules for handling files
import csv
from pathlib import Path
from sys import argv
# import third-party packages
import numpy as np
import tifffile as tiff
from skimage import img_as_ubyte
from skimage.morphology import binary_opening
#from scipy.ndimage import generate_binary_structure
# import utility functions
from .utility import mask_cell
# %%
def batch_mask(path, pattern='GFP', mask_channel=None,
camera_bits=16, r=10, method='triangle', mask_open=True,
save_values=False, save_summary=False, save_mask=False):
"""
Read all .tif images with a keyword and apply a 3D masking procedure
based on a median-filtered image.
Returns
-------
dict
Key is the image name, value is a flat array of all intensities in the masked image.
Parameters
----------
path: str
A path to folder with images to be processed. Must contain images in TIFF format.
pattern: str, optional
A pattern within filenames to be processed.
mask_channel: str, optional
If specified, the mask is created based on another image.
Both images have to have the same name, except *mask_channel* is substituted for *pattern*.
camera_bits: int, optional
Ignore images with saturated pixels, based on the camera digitizer bit-depth.
r: int, optional
Radius for the median filtering function.
method: str, optional
Which thresholding method to use. See .utility.treshold().
mask_open: bool, optional
If True, perform a binary opening of the mask with the default selem (3D cross).
save_values: bool, optional
If True, write one .txt file per image with all pixel values.
save_summary: bool, optional
If True, write one .csv file per image with summary statistics (mean, median, sd).
save_mask: bool, optional
If True, save masks as 8-bit .tif files.
"""
# path handling through Pathlib: make output folder within current path
path_in = Path(path)
# initialise a dictionary to store results
pixels = {}
# output: prepare folder to keep masks
if save_mask:
path_out = path_in.joinpath('masks') # prepare output path
path_out.mkdir(parents=True, exist_ok=True)
# actual function: loop over each file with pattern, mask and convert to array
for i in sorted(path_in.glob('*' + pattern + '*')):
im = tiff.imread(str(i))
# filter out saturated images
if 2 ** camera_bits - 1 in im:
continue
# generate and apply mask
if mask_channel:
im_alt = tiff.imread(str(i).replace(pattern, mask_channel))
im_mask = mask_cell(im_alt, radius=r, method=method)
if mask_open:
im_mask = binary_opening(im_mask)
im_values = im[im_mask] # mask and select values
else:
im_mask = mask_cell(im, radius=r, method=method)
if mask_open:
im_mask = binary_opening(im_mask)
im_values = im[im_mask] # mask and select values
# add dictionary entry with name (no extension) and pixel values
pixels[i.name.replace('.tif', '')] = im_values
# output: save masks in a subfolder
if save_mask:
# substitute channel and / or annotate mask in filename
if mask_channel:
mask_out = path_out.joinpath(i.name.replace(
pattern, mask_channel).replace('.tif', '_mask.tif'))
else:
mask_out = path_out.joinpath(
i.name.replace('.tif', '_mask.tif'))
tiff.imsave(mask_out, img_as_ubyte(im_mask))
# very useful for assessing the algorithm but ultimately waste of space
tiff.imsave(path_out.joinpath(i.name.replace('.tif', '_masked.tif')),
im * im_mask)
# output: save each dictionary entry as separate file in a subfolder
if save_values:
path_out = path_in.joinpath('masked_arrays') # prepare output path
f = '%i' # not quite necessary but the default 18-digit precision means relatively huge files
path_out.mkdir(parents=True, exist_ok=True)
# save array
for key, value in pixels.items():
np.savetxt(str(path_out.joinpath(key))+'.txt', value, fmt=f)
# output: save a csv file with mean intensity for each cell
if save_summary:
path_out = path_in.joinpath("summary.csv")
with path_out.open('w', newline='') as f: # initialize a csv file for writing
# initialize csv writer and write headers
writer = csv.writer(f, dialect='excel')
writer.writerow(['cell', 'mean', 'median', 'sd'])
for key, value in pixels.items():
writer.writerow([key, round(np.mean(value), 3),
np.median(value), round(np.std(value), 3)])
# output: return dictionary of masked pixels
return(pixels)
# get the path from command line and run counting function
if __name__ == "__main__": # only executed if ran as script
path = argv[1]
batch_mask(path, save_summary=True)
|
py | b4095cd54a6f4ac6d0aaddbb90b4aeb0f6e29de0 | #!/usr/bin/env python
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Check BOTMETA file."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import os
import re
import sys
import yaml
from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA
from voluptuous import Required, Schema, Invalid
from voluptuous.humanize import humanize_error
IGNORE_NO_MAINTAINERS = [
'plugins/cache/memcached.py',
'plugins/cache/redis.py',
'plugins/callback/cgroup_memory_recap.py',
'plugins/callback/context_demo.py',
'plugins/callback/counter_enabled.py',
'plugins/callback/hipchat.py',
'plugins/callback/jabber.py',
'plugins/callback/log_plays.py',
'plugins/callback/logdna.py',
'plugins/callback/logentries.py',
'plugins/callback/null.py',
'plugins/callback/selective.py',
'plugins/callback/slack.py',
'plugins/callback/splunk.py',
'plugins/callback/yaml.py',
'plugins/inventory/nmap.py',
'plugins/inventory/virtualbox.py',
'plugins/connection/chroot.py',
'plugins/connection/iocage.py',
'plugins/connection/lxc.py',
'plugins/lookup/cartesian.py',
'plugins/lookup/chef_databag.py',
'plugins/lookup/consul_kv.py',
'plugins/lookup/credstash.py',
'plugins/lookup/cyberarkpassword.py',
'plugins/lookup/flattened.py',
'plugins/lookup/keyring.py',
'plugins/lookup/lastpass.py',
'plugins/lookup/passwordstore.py',
'plugins/lookup/shelvefile.py',
'plugins/filter/json_query.py',
'plugins/filter/random_mac.py',
]
FILENAME = '.github/BOTMETA.yml'
LIST_ENTRIES = frozenset(('supershipit', 'maintainers', 'labels', 'keywords', 'notify', 'ignore'))
AUTHOR_REGEX = re.compile(r'^\w.*\(@([\w-]+)\)(?![\w.])')
def read_authors(filename):
data = {}
try:
with open(filename, 'rb') as b_module_data:
M = ast.parse(b_module_data.read())
for child in M.body:
if isinstance(child, ast.Assign):
for t in child.targets:
try:
theid = t.id
except AttributeError:
# skip errors can happen when trying to use the normal code
continue
if theid == 'DOCUMENTATION':
if isinstance(child.value, ast.Dict):
data = ast.literal_eval(child.value)
else:
data = yaml.safe_load(child.value.s)
except Exception as e:
print('%s:%d:%d: Cannot load DOCUMENTATION: %s' % (filename, 0, 0, e))
return []
author = data.get('author') or []
if isinstance(author, str):
author = [author]
return author
def extract_author_name(author):
m = AUTHOR_REGEX.match(author)
if m:
return m.group(1)
if author == 'Ansible Core Team':
return '$team_ansible_core'
return None
def validate(filename, filedata):
if not filename.startswith('plugins/'):
return
if filename.startswith(('plugins/doc_fragments/', 'plugins/module_utils/')):
return
# Compile lis tof all active and inactive maintainers
all_maintainers = filedata['maintainers'] + filedata['ignore']
if not filename.startswith('plugins/filter/'):
maintainers = read_authors(filename)
for maintainer in maintainers:
maintainer = extract_author_name(maintainer)
if maintainer is not None and maintainer not in all_maintainers:
msg = 'Author %s not mentioned as active or inactive maintainer for %s (mentioned are: %s)' % (
maintainer, filename, ', '.join(all_maintainers))
print('%s:%d:%d: %s' % (FILENAME, 0, 0, msg))
should_have_no_maintainer = filename in IGNORE_NO_MAINTAINERS
if not all_maintainers and not should_have_no_maintainer:
print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'No (active or inactive) maintainer mentioned for %s' % filename))
if all_maintainers and should_have_no_maintainer:
print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Please remove %s from the ignore list of %s' % (filename, sys.argv[0])))
def main():
"""Main entry point."""
try:
with open(FILENAME, 'rb') as f:
botmeta = yaml.safe_load(f)
except yaml.error.MarkedYAMLError as ex:
print('%s:%d:%d: YAML load failed: %s' % (FILENAME, ex.context_mark.line +
1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex))))
return
except Exception as ex: # pylint: disable=broad-except
print('%s:%d:%d: YAML load failed: %s' %
(FILENAME, 0, 0, re.sub(r'\s+', ' ', str(ex))))
return
# Validate schema
MacroSchema = Schema({
(str): Any(str, None),
}, extra=PREVENT_EXTRA)
FilesSchema = Schema({
(str): {
('supershipit'): str,
('support'): Any('community'),
('maintainers'): str,
('labels'): str,
('keywords'): str,
('notify'): str,
('ignore'): str,
},
}, extra=PREVENT_EXTRA)
schema = Schema({
('notifications'): bool,
('automerge'): bool,
('macros'): MacroSchema,
('files'): FilesSchema,
}, extra=PREVENT_EXTRA)
try:
schema(botmeta)
except MultipleInvalid as ex:
for error in ex.errors:
# No way to get line/column numbers
print('%s:%d:%d: %s' % (FILENAME, 0, 0, humanize_error(botmeta, error)))
return
# Preprocess (substitute macros, convert to lists)
macros = botmeta.get('macros') or {}
macro_re = re.compile(r'\$([a-zA-Z_]+)')
def convert_macros(text, macros):
def f(m):
macro = m.group(1)
replacement = (macros[macro] or '')
if macro == 'team_ansible_core':
return '$team_ansible_core %s' % replacement
return replacement
return macro_re.sub(f, text)
files = {}
try:
for file, filedata in (botmeta.get('files') or {}).items():
file = convert_macros(file, macros)
filedata = dict((k, convert_macros(v, macros)) for k, v in filedata.items())
files[file] = filedata
for k, v in filedata.items():
if k in LIST_ENTRIES:
filedata[k] = v.split()
except KeyError as e:
print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Found unknown macro %s' % e))
return
# Scan all files
unmatched = set(files)
for dirs in ('plugins', 'tests', 'changelogs'):
for dirpath, dirnames, filenames in os.walk(dirs):
for file in sorted(filenames):
if file.endswith('.pyc'):
continue
filename = os.path.join(dirpath, file)
if os.path.islink(filename):
continue
if os.path.isfile(filename):
matching_files = []
for file, filedata in files.items():
if filename.startswith(file):
matching_files.append((file, filedata))
if file in unmatched:
unmatched.remove(file)
if not matching_files:
print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Did not find any entry for %s' % filename))
matching_files.sort(key=lambda kv: kv[0])
filedata = dict()
for k in LIST_ENTRIES:
filedata[k] = []
for dummy, data in matching_files:
for k, v in data.items():
if k in LIST_ENTRIES:
v = filedata[k] + v
filedata[k] = v
validate(filename, filedata)
for file in unmatched:
print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Entry %s was not used' % file))
if __name__ == '__main__':
main()
|
py | b4095e6760ec09ad8883c207b685d1d03a54ef1f | '''Containers for generic tensors mimicking corresponding Parameter containers
see torch.nn.ParameterDict and torch.nn.ParameterList for reference.
'''
import torch
import operator
from collections import OrderedDict
from torch._six import container_abcs
from torch.nn import Module
class BufferList(Module):
r"""Holds buffers in a list.
:class:`~torch.nn.BufferList` can be indexed like a regular Python
list, but buffers it contains are properly registered, and will be
visible by all :class:`~torch.nn.Module` methods.
Arguments:
parameters (iterable, optional): an iterable of :class:`~torch.Tensor` to add
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.buffers = torch.TensorList([
torch.Tensor(torch.randn(10, 10)) for i in range(10)
])
def forward(self, x):
# BufferList can act as an iterable, or be indexed using ints
for i, p in enumerate(self.buffers):
x = self.buffers[i // 2].mm(x) + p.mm(x)
return x
"""
def __init__(self, tensors=None):
super().__init__()
if tensors is not None:
self += tensors
def _get_abs_string_index(self, idx):
"""Get the absolute index for the list of modules"""
idx = operator.index(idx)
if not (-len(self) <= idx < len(self)):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx += len(self)
return str(idx)
def __getitem__(self, idx):
if isinstance(idx, slice):
return type(self)(list(self._buffers.values())[idx])
else:
idx = self._get_abs_string_index(idx)
return self._buffers[str(idx)]
def __setitem__(self, idx, tensor):
idx = self._get_abs_string_index(idx)
return self.register_buffer(str(idx), tensor)
def __len__(self):
return len(self._buffers)
def __iter__(self):
return iter(self._buffers.values())
def __iadd__(self, tensors):
return self.extend(tensors)
def __dir__(self):
keys = super().__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def append(self, tensor):
"""Appends a given tensor at the end of the list.
Arguments:
tensor (torch.Tensor): buffer to append
"""
self.register_buffer(str(len(self)), tensor)
return self
def extend(self, tensors):
"""Appends tensors from a Python iterable to the end of the list.
Arguments:
tensors (iterable): iterable of buffers to append
"""
if not isinstance(tensors, container_abcs.Iterable):
raise TypeError("BufferList.extend should be called with an "
"iterable, but got " + type(tensors).__name__)
offset = len(self)
for i, tensor in enumerate(tensors):
self.register_buffer(str(offset + i), tensor)
return self
def extra_repr(self):
child_lines = []
for k, t in self._buffers.items():
size_str = 'x'.join(str(size) for size in t.size())
device_str = '' if not t.is_cuda else ' (GPU {})'.format(t.get_device())
parastr = '{} of size {}{}'.format(
torch.typename(t), size_str, device_str)
child_lines.append(' (' + str(k) + '): ' + parastr)
tmpstr = '\n'.join(child_lines)
return tmpstr
def __call__(self, input):
raise RuntimeError('BufferList should not be called.')
class BufferDict(Module):
r"""Holds buffers in a dictionary.
BufferDict can be indexed like a regular Python dictionary, but buffers it
contains are properly registered, and will be visible by all Module methods.
:class:`~torch.nn.BufferDict` is an **ordered** dictionary that respects
* the order of insertion, and
* in :meth:`~torch.nn.BufferDict.update`, the order of the merged ``OrderedDict``
or another :class:`~torch.nn.BufferDict` (the argument to
:meth:`~torch.nn.BufferDict.update`).
Note that :meth:`~torch.nn.BufferDict.update` with other unordered mapping
types (e.g., Python's plain ``dict``) does not preserve the order of the
merged mapping.
Arguments:
buffers (iterable, optional): a mapping (dictionary) of
(string : :class:`~torch.Tensor`) or an iterable of key-value pairs
of type (string, :class:`~torch.Tensor`)
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.buffers = nn.BufferDict({
'left': torch.randn(5, 10),
'right': torch.randn(5, 10)
})
def forward(self, x, choice):
x = self.buffers[choice].mm(x)
return x
"""
def __init__(self, tensors=None):
super().__init__()
if tensors is not None:
self.update(tensors)
def __getitem__(self, key):
return self._buffers[key]
def __setitem__(self, key, parameter):
self.register_buffer(key, parameter)
def __delitem__(self, key):
del self._buffers[key]
def __len__(self):
return len(self._buffers)
def __iter__(self):
return iter(self._buffers.keys())
def __contains__(self, key):
return key in self._buffers
def clear(self):
"""Remove all items from the BufferDict.
"""
self._buffers.clear()
def pop(self, key):
r"""Remove key from the BufferDict and return its parameter.
Arguments:
key (string): key to pop from the BufferDict
"""
v = self[key]
del self[key]
return v
def keys(self):
r"""Return an iterable of the BufferDict keys.
"""
return self._buffers.keys()
def items(self):
r"""Return an iterable of the BufferDict key/value pairs.
"""
return self._buffers.items()
def values(self):
r"""Return an iterable of the BufferDict values.
"""
return self._buffers.values()
def update(self, tensors):
r"""Update the :class:`~torch.nn.BufferDict` with the key-value pairs from a
mapping or an iterable, overwriting existing keys.
.. note::
If :attr:`buffers` is an ``OrderedDict``, a :class:`~torch.nn.BufferDict`, or
an iterable of key-value pairs, the order of new elements in it is preserved.
Arguments:
parameters (iterable): a mapping (dictionary) from string to
:class:`~torch.Tensor`, or an iterable of
key-value pairs of type (string, :class:`~torch.Tensor`)
"""
if not isinstance(tensors, container_abcs.Iterable):
raise TypeError("BufferDict.update should be called with an "
"iterable of key/value pairs, but got " +
type(tensors).__name__)
if isinstance(tensors, container_abcs.Mapping):
if isinstance(tensors, (OrderedDict, BufferDict)):
for key, tensor in tensors.items():
self[key] = tensor
else:
for key, tensor in sorted(tensors.items()):
self[key] = tensor
else:
for j, t in enumerate(tensors):
if not isinstance(t, container_abcs.Iterable):
raise TypeError("BufferDict update sequence element "
"#" + str(j) + " should be Iterable; is" +
type(t).__name__)
if not len(t) == 2:
raise ValueError("BufferDict update sequence element "
"#" + str(j) + " has length " + str(len(t)) +
"; 2 is required")
self[t[0]] = t[1]
def extra_repr(self):
child_lines = []
for k, t in self._buffers.items():
size_str = 'x'.join(str(size) for size in t.size())
device_str = '' if not t.is_cuda else ' (GPU {})'.format(t.get_device())
parastr = '{} of size {}{}'.format(
torch.typename(t), size_str, device_str)
child_lines.append(' (' + k + '): ' + parastr)
tmpstr = '\n'.join(child_lines)
return tmpstr
def __call__(self, input):
raise RuntimeError('BufferDict should not be called.')
|
py | b4095f149e2b16c5f4cce07c7819ff7228eb5d98 | from django.urls import path
from modelo import views as v
app_name = 'modelo'
urlpatterns = [
path('', v.index, name='modelo_index'),
] |
py | b4095f39efe6913f36f3cca1a9a9944a1dc9513c | # Generated by Django 3.1.4 on 2020-12-22 13:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tests', '0013_auto_20201221_1707'),
]
operations = [
migrations.AddField(
model_name='test_data',
name='avg1',
field=models.CharField(default='n/a', max_length=3),
),
migrations.AddField(
model_name='test_data',
name='avg2',
field=models.CharField(default='n/a', max_length=3),
),
migrations.AddField(
model_name='test_data',
name='avg3',
field=models.CharField(default='n/a', max_length=3),
),
migrations.AddField(
model_name='test_data',
name='avg4',
field=models.CharField(default='n/a', max_length=3),
),
migrations.AlterField(
model_name='question_attempt',
name='result',
field=models.CharField(default='O', max_length=1),
),
migrations.AlterField(
model_name='test_data',
name='score',
field=models.CharField(default='n/a', max_length=3),
),
]
|
py | b4096049ad8af715dbd1b7b57014fcc45d11b586 | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 54019 if testnet else 53019
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
py | b409610696cc0bc02156645fcf6dfff919728c96 | from queryset_utils import annotate_mock_class, make_mock_list_from_args, get_keys_from_dict, make_mock_in_bulk_dict, make_mock_aggregate_dict, annotate_return_value
from unittest.mock import MagicMock
import unittest
class TestAnnotateMockClass(unittest.TestCase):
def test_annotate_mock_class(self):
mock_model = MagicMock()
kwargs = {'test':'test'}
mock_class = annotate_mock_class(kwargs, mock_model)
self.assertTrue(hasattr(mock_class, 'test'))
self.assertEqual(mock_class.test, 'test')
class TestMakeMockListFromArgs(unittest.TestCase):
def test_make_mock_list_from_args(self):
args = ['test', 'test2']
mock_values_list = make_mock_list_from_args(args)
self.assertEqual(mock_values_list, [1, 1])
def test_make_mock_list_from_args_empty_args(self):
args = []
mock_values_list = make_mock_list_from_args(args)
self.assertEqual(mock_values_list, [1])
class TestGetKeysFromDict(unittest.TestCase):
def test_get_keys_from_dict(self):
test_dict = {'key1': 1, 'key2': 2}
keys_list = get_keys_from_dict(test_dict)
self.assertEqual(keys_list, ['key1', 'key2'])
class TestMakeMockInBulkDict(unittest.TestCase):
def test_make_mock_in_bulk_dict(self):
args = ['test']
mock_in_bulk_dict = make_mock_in_bulk_dict(args)
self.assertEqual(mock_in_bulk_dict, {'test': ' '})
def test_make_mock_in_bulk_dict_empty_args(self):
args = []
mock_in_bulk_dict = make_mock_in_bulk_dict(args)
self.assertEqual(mock_in_bulk_dict, {'1': ' '})
class TestMakeMockAnnotateDict(unittest.TestCase):
def test_make_mock_aggregate_dict(self):
kwargs = {'test': 'test'}
mock_aggregate_dict = make_mock_aggregate_dict(kwargs)
self.assertEqual(mock_aggregate_dict, {'test': 'test'} )
class TestAnnotateReturnValue(unittest.TestCase):
def test_annotate_return_value(self):
kwargs = {'test':'test'}
return_value = {'original_key':'original_value'}
new_return_value = annotate_return_value(kwargs, return_value)
self.assertEqual(new_return_value, {'original_key': ' ', 'test': ' '})
|
py | b40961284c687cb55c744163feffdae7c8dfed9a | import os
import pytest
from aioscrapy.cache import FileCache, MemoryCache
def test_file_cache(tmpdir: str):
key = 'key'
fake_key = 'fake_key'
value = [1, 2, 3]
cache = FileCache(tmpdir)
cache.set(key, value)
assert cache.get(key) == value
with pytest.raises(LookupError):
cache.get(fake_key)
def test_file_cache_wrong_dir(tmpdir: str):
key = 'key'
value = [1, 2, 3]
os.chmod(tmpdir, 0o400)
cache = FileCache(tmpdir)
with pytest.raises(OSError):
cache.set(key, value)
def test_memory_cache():
key = 'key'
fake_key = 'fake_key'
value = [1, 2, 3]
cache = MemoryCache()
cache.set(key, value)
assert cache.get(key) == value
with pytest.raises(LookupError):
cache.get(fake_key)
|
py | b4096157a9ba93c4e8dfa227fdb41caad2ccfb76 | import os
import random
import collections
from PIL import Image, ImageChops, ImageDraw, ImageSequence, ImagePalette
import pathlib
import glob
import hashlib
from pandas import *
import copy
import cProfile
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.animation as animation
import numpy
import math
import calendar
import time
DIR=str(pathlib.Path(__file__).parent.absolute())
DIR_OUTPUT = os.path.join(DIR,"output")
DIR_INPUT = os.path.join(DIR,"input")
ROTATIONS = False
def cellsDirs(pos, size):
"""
Returns all possible directions in a position within a 2D array of given size.
"""
x, y = pos
width, height = size
dirs = []
if x > 0: dirs.append([-1,0])
if x < width-1: dirs.append([1,0])
if y > 0: dirs.append([0,-1])
if y < height-1: dirs.append([0,1])
return dirs
def cellsDirsNoPos():
"""
Returns all possible directions.
"""
dirs = []
dirs.append([-1,0])
dirs.append([1,0])
dirs.append([0,-1])
dirs.append([0,1])
return dirs
def initWorkspace():
files = glob.glob(DIR_OUTPUT+"/*")
for f in files:
os.remove(f)
def imgHash(img:Image):
return hashlib.md5(img.tobytes()).hexdigest()
def crop(img, x, y, w, h):
box = (x, y, x+w, y+h)
area = img.crop(box)
return area
def get_concat_h(im1, im2):
dst = Image.new('RGB', (im1.width + im2.width, im1.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (im1.width, 0))
return dst
def get_concat_v(im1, im2):
dst = Image.new('RGB', (im1.width, im1.height + im2.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (0, im1.height))
return dst
def smart_get_concat(im1,im2,dir):
if dir[0]!=0:
return get_concat_h(im1,im2)
else:
return get_concat_v(im1,im2)
def createPatternsFromImages(img1:Image, img2:Image, dir:list):
"""
Returns all patterns created from concatenation of two images with given direction.
"""
patterns = dict()
if dir[0]!=0:
concatenated = get_concat_h(img1,img2)
for x in range(img1.width):
pattern = crop(concatenated,x,0,img1.width,img1.height)
key = imgHash(pattern)
patterns.setdefault(key,0)
patterns[key] = pattern.copy()
if ROTATIONS:
for i in range(1,4):
pattern = pattern.rotate(90)
key = imgHash(pattern)
patterns.setdefault(key,0)
patterns[key] = pattern.copy()
elif dir[1]!=0:
concatenated = get_concat_v(img1,img2)
for y in range(img1.height):
pattern = crop(concatenated,0,y,img1.width,img1.height)
key = imgHash(pattern)
patterns.setdefault(key,0)
patterns[key] = pattern.copy()
if ROTATIONS:
for i in range(1,4):
pattern = pattern.rotate(90)
key = imgHash(pattern)
patterns.setdefault(key,0)
patterns[key] = pattern.copy()
return patterns
def createPatternsFromImage(imgsrc:Image, N:int):
"""
Creates NxN patterns from image with given filename. Returns a dict (k,v) where k = hash of img, v = img.
"""
patterns = dict()
imgwrap = crop(imgsrc,0,0,imgsrc.width,imgsrc.height)
img = Image.new('RGB', (imgsrc.width+N, imgsrc.height+N))
img.paste(imgsrc,(0,0))
img.paste(imgwrap,(0,imgsrc.height))
img.paste(imgwrap,(imgsrc.width,0))
img.paste(imgwrap,(imgsrc.width,imgsrc.height))
for x in range(img.size[0]-N):
for y in range(img.size[1]-N):
pattern = crop(img, x, y,N,N)
key = f"pat_{x}_{y}_r{0}"
key = imgHash(pattern)
patterns.setdefault(key,0)
patterns[key] = pattern.copy()
if ROTATIONS:
for i in range(1,4):
pattern = pattern.rotate(90)
key = f"pat_{x}_{y}_r{i}"
key = imgHash(pattern)
patterns.setdefault(key,0)
patterns[key] = pattern.copy()
return patterns
class WFC2D:
def __init__(self, inputimg:Image, N:int, cellCount:int):
self.inputimg = inputimg
self.N = N
self.cellCount = cellCount
self.__initPatterns(show=True)
self.__initConstraints(show=False)
self.__initCells()
self.animation_frames_plt = list()
self.animation_frames_gif = list()
def __initPatterns(self,show=True):
self.patterns = createPatternsFromImage(self.inputimg, self.N)
print(f"Finished initing patterns. Total number of patterns: {len(self.patterns)}")
#Makes simple patterns plot.
if not show:
return
s = math.sqrt(len(self.patterns))+1
fig = plt.figure(figsize=(self.N*4,self.N*4))
for i,pattern in enumerate(self.patterns.values(),1):
fig.add_subplot(s,s,i)
plt.axis('off')
plt.imshow(pattern)
fig.canvas.set_window_title('Patterns')
plt.show()
plt.close()
def __initCells(self):
"""
Makes every cell a superposition of all possible states.
"""
self.cells = list()
for i in range(self.cellCount):
cellrow = list()
for j in range(self.cellCount):
cell = list()
for key in self.patterns.keys():
cell.append(key)
cellrow.append(cell)
self.cells.append(cellrow)
print(f"Finished initing cells.")
def __observe(self):
"""
Selects cell with minimal entropy and chooses a state randomly.
"""
min = len(self.patterns)+1
minidx = -1
minidy = -1
for idrow,i in enumerate(self.cells):
for id, j in enumerate(self.cells[idrow]):
if len(j)>1:
val = len(j)
if(val<min):
minidx = idrow
minidy = id
min = val
#Random one of possible choices at cell with minimal entropy
#Possible change: random distribution using number of patterns that appeared in input
if minidx == -1:
return False
self.cells[minidx][minidy] = [self.cells[minidx][minidy][random.randint(0,len(self.cells[minidx][minidy])-1)]]
return [minidx, minidy]
def imageFromCells(self):
"""
Creates image from cells. Each cell has a list of possible patterns, if it's length is 1, then it means it is collapsed and we can
map it to our patterns to create one unique image.
"""
outputImage = Image.new('RGB',(self.N*self.cellCount,self.N*self.cellCount),color=(128,128,128))
for idrow,i in enumerate(self.cells):
for id, j in enumerate(self.cells[idrow]):
if len(j):
pattern = self.patterns[j[0]]
outputImage.paste(pattern, (idrow*self.N, id*self.N))
return outputImage
def __initConstraints(self,show=True):
self.constraints = list()
for keyi,itemi in self.patterns.items():
for keyj,itemj in self.patterns.items():
for dir in cellsDirsNoPos():
foundpatterns = createPatternsFromImages(itemi, itemj, dir)
if set(foundpatterns)<=set(self.patterns.keys()):
self.constraints.append([keyi,keyj, dir])
print(f"Finished calculating constraints. Total number of constraints: {len(self.constraints)}")
#Makes a simple constraints plots.
if not show:
return
fig = plt.figure(figsize=(self.N*4,self.N*4))
s = math.sqrt(len(self.constraints))+1
for i,c in enumerate(self.constraints,1):
fig.add_subplot(s,s,i)
plt.axis('off')
im = smart_get_concat(self.patterns[c[0]],self.patterns[c[1]],c[2])
plt.imshow(im)
fig.canvas.set_window_title('Constraints')
plt.show()
plt.close()
def __stackpropagate(self, pos:list):
"""Propagates constraint information to all neighbours. Repeat until no changes"""
stack = [pos]
while len(stack)>0:
current_pos=stack.pop()
for dir in cellsDirs(current_pos,[self.cellCount,self.cellCount]):
next_pos_x = (current_pos[0]+dir[0])
next_pos_y = (current_pos[1]+dir[1])
for tile in set(self.cells[next_pos_x][next_pos_y]):
#Check if any combinations match with constraints for a given tile
possible_tile = any([cur_tile,tile,dir] in self.constraints for cur_tile in self.cells[current_pos[0]][current_pos[1]])
#If not, this tile is invalid, remove it and propagate information to the neighbours
if not possible_tile:
self.cells[next_pos_x][next_pos_y].remove(tile)
if [next_pos_x, next_pos_y] not in stack:
stack.append([next_pos_x,next_pos_y])
def __hasError(self):
for idrow, i in enumerate(self.cells):
for id, j in enumerate(self.cells[idrow]):
if not j:
return True
return False
def generate(self):
fig = plt.figure()
fig.canvas.set_window_title("Output")
try:
k=0
while True:
im = self.imageFromCells()
#Copy current frame into plt and gif list
#matplotlib forces you to specify ffmpeg libs
#PILLOW can create gifs automatically, so we use it
#to make things easier.
self.animation_frames_plt.append([plt.imshow(im,animated=True)])
self.animation_frames_gif.append(im.convert('P',palette=Image.ADAPTIVE))
k=k+1
cells_copy = copy.deepcopy(self.cells)
pos=self.__observe()
if pos==False:
break
self.__stackpropagate(pos)
if k>self.cellCount*self.cellCount*4:
print("Possible error: deadlock. Restart program or change input.")
self.__reset()
return
if self.__hasError():
self.cells = copy.deepcopy(cells_copy)
continue
except:
print("Found exception: \n")
print(DataFrame(self.cells))
self.imageFromCells().save(os.path.join(DIR_OUTPUT,f"EXCEPTION.png"))
raise
ani = animation.ArtistAnimation(fig,self.animation_frames_plt,interval=50,repeat=False)
self.animation_frames_gif[0].save(os.path.join(DIR_OUTPUT,"out.gif"),format='GIF',save_all=True,append_images=self.animation_frames_gif[1:],duration=20,loop=0)
plt.show()
def __reset(self):
print("Reseting data...")
self.__initCells()
self.animation_frames_plt=list()
self.animation_frames_gif=list()
if __name__ == "__main__":
initWorkspace()
print("Input filename (only .png images files from input folder):")
fname = input()
print("N parameter for NxN patterns:")
n = int(input())
print("Number of cells (with NxN size) that will make CxC image:")
c = int(input())
wfc = WFC2D(Image.open(os.path.join(DIR_INPUT,fname)), n, c)
wfc.generate()
|
py | b40961bdca506fdaf4b9e5ca692545fada6baf19 | #!/usr/bin/env python
# coding: utf-8
# # workingpapers markdown generator for academicpages
#
# Takes a set of bibtex of workingpapers and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)).
#
# The core python code is also in `pubsFromBibs.py`.
# Run either from the `markdown_generator` folder after replacing updating the publist dictionary with:
# * bib file names
# * specific venue keys based on your bib file preferences
# * any specific pre-text for specific files
# * Collection Name (future feature)
#
# TODO: Make this work with other databases of citations,
# TODO: Merge this with the existing TSV parsing solution
from pybtex.database.input import bibtex
import pybtex.database.input.bibtex
from time import strptime
import string
import html
import os
import re
#todo: incorporate different collection types rather than a catch all workingpapers, requires other changes to template
publist = {
"proceeding": {
"file" : "proceedings.bib",
"venuekey": "booktitle",
"venue-pretext": "In the proceedings of ",
"collection" : {"name":"workingpapers",
"permalink":"/publication/"}
},
"journal":{
"file": "pubs.bib",
"venuekey" : "journal",
"venue-pretext" : "",
"collection" : {"name":"workingpapers",
"permalink":"/publication/"}
}
}
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
for pubsource in publist:
parser = bibtex.Parser()
bibdata = parser.parse_file(publist[pubsource]["file"])
#loop through the individual references in a given bibtex file
for bib_id in bibdata.entries:
#reset default date
pub_year = "1900"
pub_month = "01"
pub_day = "01"
b = bibdata.entries[bib_id].fields
try:
pub_year = f'{b["year"]}'
#todo: this hack for month and day needs some cleanup
if "month" in b.keys():
if(len(b["month"])<3):
pub_month = "0"+b["month"]
pub_month = pub_month[-2:]
elif(b["month"] not in range(12)):
tmnth = strptime(b["month"][:3],'%b').tm_mon
pub_month = "{:02d}".format(tmnth)
else:
pub_month = str(b["month"])
if "day" in b.keys():
pub_day = str(b["day"])
pub_date = pub_year+"-"+pub_month+"-"+pub_day
#strip out {} as needed (some bibtex entries that maintain formatting)
clean_title = b["title"].replace("{", "").replace("}","").replace("\\","").replace(" ","-")
url_slug = re.sub("\\[.*\\]|[^a-zA-Z0-9_-]", "", clean_title)
url_slug = url_slug.replace("--","-")
md_filename = (str(pub_date) + "-" + url_slug + ".md").replace("--","-")
html_filename = (str(pub_date) + "-" + url_slug).replace("--","-")
#Build Citation from text
citation = ""
#citation authors - todo - add highlighting for primary author?
for author in bibdata.entries[bib_id].persons["author"]:
citation = citation+" "+author.first_names[0]+" "+author.last_names[0]+", "
#citation title
citation = citation + "\"" + html_escape(b["title"].replace("{", "").replace("}","").replace("\\","")) + ".\""
#add venue logic depending on citation type
venue = publist[pubsource]["venue-pretext"]+b[publist[pubsource]["venuekey"]].replace("{", "").replace("}","").replace("\\","")
citation = citation + " " + html_escape(venue)
citation = citation + ", " + pub_year + "."
## YAML variables
md = "---\ntitle: \"" + html_escape(b["title"].replace("{", "").replace("}","").replace("\\","")) + '"\n'
md += """collection: """ + publist[pubsource]["collection"]["name"]
md += """\npermalink: """ + publist[pubsource]["collection"]["permalink"] + html_filename
note = False
if "note" in b.keys():
if len(str(b["note"])) > 5:
md += "\nexcerpt: '" + html_escape(b["note"]) + "'"
note = True
md += "\ndate: " + str(pub_date)
md += "\nvenue: '" + html_escape(venue) + "'"
url = False
if "url" in b.keys():
if len(str(b["url"])) > 5:
md += "\npaperurl: '" + b["url"] + "'"
url = True
md += "\ncitation: '" + html_escape(citation) + "'"
md += "\n---"
## Markdown description for individual page
if note:
md += "\n" + html_escape(b["note"]) + "\n"
if url:
md += "\n[Access paper here](" + b["url"] + "){:target=\"_blank\"}\n"
else:
md += "\nUse [Google Scholar](https://scholar.google.com/scholar?q="+html.escape(clean_title.replace("-","+"))+"){:target=\"_blank\"} for full citation"
md_filename = os.path.basename(md_filename)
with open("../_workingpapers/" + md_filename, 'w') as f:
f.write(md)
print(f'SUCESSFULLY PARSED {bib_id}: \"', b["title"][:60],"..."*(len(b['title'])>60),"\"")
# field may not exist for a reference
except KeyError as e:
print(f'WARNING Missing Expected Field {e} from entry {bib_id}: \"', b["title"][:30],"..."*(len(b['title'])>30),"\"")
continue
|
py | b40962e030d0ccb03007f07b7dba5fa0d9088250 | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass, field as _field
from ...config import custom_scalars, datetime
from gql_client.runtime.variables import encode_variables
from gql import gql, Client
from gql.transport.exceptions import TransportQueryError
from functools import partial
from numbers import Number
from typing import Any, AsyncGenerator, Dict, List, Generator, Optional
from time import perf_counter
from dataclasses_json import DataClassJsonMixin, config
from ..input.import_flow_draft_input import ImportFlowDraftInput
# fmt: off
QUERY: List[str] = ["""
mutation ImportFlowDraftMutation($input: ImportFlowDraftInput!) {
importFlowDraft(input: $input) {
id
name
}
}
"""
]
class ImportFlowDraftMutation:
@dataclass(frozen=True)
class ImportFlowDraftMutationData(DataClassJsonMixin):
@dataclass(frozen=True)
class FlowDraft(DataClassJsonMixin):
id: str
name: str
importFlowDraft: FlowDraft
# fmt: off
@classmethod
def execute(cls, client: Client, input: ImportFlowDraftInput) -> ImportFlowDraftMutationData.FlowDraft:
variables: Dict[str, Any] = {"input": input}
new_variables = encode_variables(variables, custom_scalars)
response_text = client.execute(
gql("".join(set(QUERY))), variable_values=new_variables
)
res = cls.ImportFlowDraftMutationData.from_dict(response_text)
return res.importFlowDraft
# fmt: off
@classmethod
async def execute_async(cls, client: Client, input: ImportFlowDraftInput) -> ImportFlowDraftMutationData.FlowDraft:
variables: Dict[str, Any] = {"input": input}
new_variables = encode_variables(variables, custom_scalars)
response_text = await client.execute_async(
gql("".join(set(QUERY))), variable_values=new_variables
)
res = cls.ImportFlowDraftMutationData.from_dict(response_text)
return res.importFlowDraft
|
py | b409634b7fa1f16f2103846f5eeeb34fda81fe68 |
def merge_flat_list_label_ov(lista):
ov_only = [r.overall for r in lista]
return ov_only
|
py | b40965fad31cbebb00f7ff0b1c114778d908da54 | # Generated by Django 2.1.5 on 2019-01-19 10:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sunless_web', '0063_auto_20190113_2104'),
]
operations = [
migrations.DeleteModel(
name='AreaEntity',
),
migrations.DeleteModel(
name='OtherEntity',
),
migrations.AddField(
model_name='patch',
name='patch_type',
field=models.CharField(choices=[('korean', '한국 번역만'), ('full', '일본 번역, 기계 번역 포함')], default='full',
max_length=20, verbose_name='패치 종류'),
),
migrations.AlterField(
model_name='entry',
name='created_at',
field=models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='생성일'),
),
]
|
py | b4096648857dd807d12ad0717408c46808faf8cb | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import functools
import inspect
import logging
import unittest
import unittest.mock as _mock
import warnings
from typing import Awaitable, Callable, TypeVar, cast
# pyre-ignore
from unittest.case import _Outcome
_RT = TypeVar("_RT") # Return Generic
FuncType = Callable[..., Awaitable[_RT]]
_F = TypeVar("_F", bound=FuncType) # Function Generic
def _tasks_warning(task_set):
if task_set:
warnings.warn(
"There are tasks already on the event loop before running "
f"the testmethod: {task_set}",
stacklevel=0,
)
warnings.warn(
"This may mean that something is creating tasks at import time",
stacklevel=0,
)
def awaitable(func: _F) -> _F:
"""
What ever we are decorating, make it awaitable.
This is not pretty, but useful when we don't know what
we are accepting, like for unittests methods
"""
@functools.wraps(func)
async def new_func(*args, **kws):
result = func(*args, **kws)
if inspect.isawaitable(result):
return await result
return result
return cast(_F, new_func)
class TestCase(unittest.TestCase):
def __init__(self, methodName="runTest", loop=None):
self.loop = loop or asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.loop.set_debug(True)
super().__init__(methodName)
async def run_async(self, testMethod, outcome, expecting_failure):
with outcome.testPartExecutor(self):
await awaitable(self.setUp)()
if outcome.success:
outcome.expecting_failure = expecting_failure
with outcome.testPartExecutor(self, isTest=True):
await awaitable(testMethod)()
outcome.expecting_failure = False
with outcome.testPartExecutor(self):
await awaitable(self.tearDown)()
await self.doCleanups()
async def doCleanups(self):
outcome = self._outcome or _Outcome()
while self._cleanups:
function, args, kwargs = self._cleanups.pop()
with outcome.testPartExecutor(self):
await awaitable(function)(*args, **kwargs)
async def debug_async(self, testMethod):
await awaitable(self.setUp)()
await awaitable(testMethod)()
await awaitable(self.tearDown)()
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
await awaitable(function)(*args, **kwargs)
@_mock.patch("asyncio.base_events.logger")
@_mock.patch("asyncio.coroutines.logger")
def asyncio_orchestration_debug(self, testMethod, b_log, c_log):
asyncio.set_event_loop(self.loop)
real_logger = logging.getLogger("asyncio").error
c_log.error.side_effect = b_log.error.side_effect = real_logger
# Don't make testmethods cleanup tasks that existed before them
before_tasks = asyncio.all_tasks(self.loop)
_tasks_warning(before_tasks)
debug_async = self.debug_async(testMethod)
self.loop.run_until_complete(debug_async)
if c_log.error.called or b_log.error.called:
self.fail("asyncio logger.error() called!")
# Sometimes we end up with a reference to our task for debug_async
tasks = {
t
for t in asyncio.all_tasks(self.loop) - before_tasks
if not (t._coro == debug_async and t.done())
}
del before_tasks
self.assertEqual(set(), tasks, "left over asyncio tasks!")
@_mock.patch("asyncio.base_events.logger")
@_mock.patch("asyncio.coroutines.logger")
def asyncio_orchestration_outcome(
self, testMethod, outcome, expecting_failure, b_log, c_log
):
asyncio.set_event_loop(self.loop)
real_logger = logging.getLogger("asyncio").error
c_log.error.side_effect = b_log.error.side_effect = real_logger
# Don't make testmethods cleanup tasks that existed before them
before_tasks = asyncio.all_tasks(self.loop)
_tasks_warning(before_tasks)
run_async = self.run_async(testMethod, outcome, expecting_failure)
ignore_tasks = getattr(
testMethod, "__unittest_asyncio_taskleaks__", False
) or getattr(self, "__unittest_asyncio_taskleaks__", False)
with outcome.testPartExecutor(self):
self.loop.run_until_complete(run_async)
# Restore expecting_faiures so we can test the below
outcome.expecting_failure = expecting_failure
if c_log.error.called or b_log.error.called:
self.fail("asyncio logger.error() called!")
# Sometimes we end up with a reference to our task for run_async
tasks = {
t
for t in asyncio.all_tasks(self.loop) - before_tasks
if not (t._coro == run_async and t.done())
}
del before_tasks
if ignore_tasks and tasks:
warnings.warn(
"There are left over asyncio tasks after running "
f"testmethod: {tasks}",
stacklevel=0,
)
else:
self.assertEqual(set(), tasks, "left over asyncio tasks!")
# pyre-ignore
def run(self, result=None):
"""
This is a complete copy of TestCase.run
But with some asyncio worked into it.
"""
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, "startTestRun", None)
if startTestRun is not None:
startTestRun()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if getattr(self.__class__, "__unittest_skip__", False) or getattr(
testMethod, "__unittest_skip__", False
):
# If the class or method was skipped.
try:
skip_why = getattr(
self.__class__, "__unittest_skip_why__", ""
) or getattr(testMethod, "__unittest_skip_why__", "")
self._addSkip(result, self, skip_why) # noqa T484
finally:
result.stopTest(self)
return None
expecting_failure_method = getattr(
testMethod, "__unittest_expecting_failure__", False
)
expecting_failure_class = getattr(self, "__unittest_expecting_failure__", False)
expecting_failure = expecting_failure_class or expecting_failure_method
outcome = _Outcome(result)
try:
self._outcome = outcome
self.asyncio_orchestration_outcome(testMethod, outcome, expecting_failure)
for test, reason in outcome.skipped:
self._addSkip(result, test, reason) # noqa T484
self._feedErrorsToResult(result, outcome.errors) # noqa T484
if outcome.success:
if expecting_failure:
if outcome.expectedFailure:
self._addExpectedFailure( # noqa T484
result, outcome.expectedFailure
)
else:
self._addUnexpectedSuccess(result) # noqa T484
else:
result.addSuccess(self)
return result
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, "stopTestRun", None)
if stopTestRun is not None:
stopTestRun()
# explicitly break reference cycles:
# outcome.errors -> frame -> outcome -> outcome.errors
# outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure
outcome.errors.clear()
outcome.expectedFailure = None
# clear the outcome, no more needed
self._outcome = None
# pyre-ignore
def debug(self):
self.asyncio_orchestration_debug(getattr(self, self._testMethodName))
class AsyncMock(_mock.Mock):
"""Mock subclass which can be awaited on. Use this as new_callable
to patch calls on async functions. Can also be used as an async context
manager - returns self.
"""
def __call__(self, *args, **kwargs):
sup = super(AsyncMock, self)
async def coro():
return sup.__call__(*args, **kwargs)
return coro()
def __await__(self):
# Calling await on a Mock/AsyncMock object will result in
# a TypeError. Instead, return the coroutine created above
# to be awaited on
return self().__await__()
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
pass
class AsyncContextManagerMock:
"""
Helper mocking class to handle context manager consturcts.
Example of usage:
async def target():
async with aiofiles.open('/tmp/b.txt') as f:
return await f.read()
class TestContextManager(TestCase):
async def test_1(self):
m = AsyncMock()
m.read.return_value = 'fff'
with async_mock.patch(
'aiofiles.open',
return_value=AsyncContextManagerMock(return_value=m)
):
r = await target()
self.assertEquals(r, 'fff')
"""
def __init__(self, *args, **kwargs):
self._mock = AsyncMock(*args, **kwargs)
async def __aenter__(self, *args, **kwargs):
return await self._mock(*args, **kwargs)
async def __aexit__(self, exc_type, exc, tb):
pass
def ignoreTaskLeaks(test_item):
test_item.__unittest_asyncio_taskleaks__ = True
return test_item
|
py | b40966efff7e87bbd8c1825f8338083679b4b821 | import pytest
import sqlite3
from rfidsecuritysvc.db import config as db
from rfidsecuritysvc.exception import DuplicateConfigError as Duplicate
from rfidsecuritysvc.exception import ConfigNotFoundError as NotFound
def test_get(mockdb):
mockdb.add_execute('SELECT * FROM config WHERE key = ?', ('test',), 'test')
assert db.get('test') == 'test'
def test_list(mockdb):
mockdb.add_execute('SELECT * FROM config ORDER BY key', cursor_return=[])
assert db.list() == []
def test_create(mockdb):
mockdb.add_execute('INSERT INTO config (key, value) VALUES (?,?)', ('test', 1))
mockdb.add_commit()
assert db.create('test', 1) is None
def test_create_IntegrityError(mockdb):
mockdb.add_execute('INSERT INTO config (key, value) VALUES (?,?)', ('test', 1))
mockdb.add_commit(sqlite3.IntegrityError)
mockdb.add_rollback()
with pytest.raises(Duplicate) as e:
db.create('test', 1)
assert type(e.value.__cause__) == sqlite3.IntegrityError
def test_delete(mockdb):
mockdb.add_execute('DELETE FROM config WHERE key = ?', ('test',), rowcount=1)
mockdb.add_commit()
assert db.delete('test') == 1
def test_update(mockdb):
mockdb.add_execute('UPDATE config SET value = ? WHERE key = ?', (1, 'test'), rowcount=1)
mockdb.add_commit()
assert db.update('test', 1) == 1
def test_update_NotFoundError(mockdb):
mockdb.add_execute('UPDATE config SET value = ? WHERE key = ?', (1, 'test'), rowcount=0)
with pytest.raises(NotFound):
db.update('test', 1)
|
py | b40967021925db8e2ea19b3c628cd3ce530e1b2c | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# Self-organizing map
# Copyright (C) 2011 Nicolas P. Rougier
#
# Distributed under the terms of the BSD License.
# -----------------------------------------------------------------------------
import numpy as np
import weave
def fromdistance(fn, shape, center=None, dtype=float):
def distance(*args):
d = 0
for i in range(len(shape)):
d += ((args[i]-center[i])/float(max(1,shape[i]-1)))**2
return np.sqrt(d)/np.sqrt(len(shape))
if center == None:
center = np.array(list(shape))//2
return fn(np.fromfunction(distance,shape,dtype=dtype))
def Gaussian(shape,center,sigma=0.5):
''' '''
def g(x):
return np.exp(-x**2/sigma**2)
return fromdistance(g,shape,center)
def blitz_gaussian(shape, center, sigma=0.5):
result = np.zeros(shape)
w,h = shape
x, y = center
code = """
int i, j;
double dx,dy;
double d,g;
for(i=0;i<w;i++)
{
for(j=0;j<h;j++)
{
dx = abs((double)i-(double)x);
dy = abs((double)j-(double)y);
if(dx>w/2)
dx = w - dx;
if(dy>h/2)
dy = h - dy;
dx /= float(w);
dy /= float(h);
d = sqrt(dx*dx+dy*dy);
g = exp(-d*d/(sigma*sigma));
result(i,j) = g;
}
}
"""
err = weave.inline(code,
['result', 'w', 'h', 'x', 'y', 'sigma'],
type_converters=weave.converters.blitz,
compiler = 'gcc')
return result
class SOM:
''' Self-organizing map '''
def __init__(self, *args):
''' Initialize som '''
self.codebook = np.zeros(args)
# for interpolation
self.meshgrid = np.meshgrid(np.linspace(0,1,args[0]), np.linspace(0,1,args[1]))
self.reset()
def reset(self):
''' Reset weights '''
self.codebook = np.random.random(self.codebook.shape)
def score(self, sample, width=1.0):
''' score a sample '''
D = ((self.codebook-sample)**2).sum(axis=-1)
return np.exp(-(D.reshape(self.codebook.shape[0:2]))**2/(2*width**2))
def interpolate(self, sample, width=1.0):
''' interpolate a sample '''
D = ((self.codebook-sample)**2).sum(axis=-1)
weights = np.exp(-(D.reshape(self.codebook.shape[0:2]))**2/(2*width**2))
weights = weights / np.sum(weights)
return np.stack([self.meshgrid[0]/weights, self.meshgrid[1]/weights])
def classify(self, sample):
''' classify a sample '''
D = ((self.codebook-sample)**2).sum(axis=-1)
winner = np.unravel_index(np.argmin(D), D.shape)
return winner
def density(self, samples):
w, h, d = self.codebook.shape
density = np.zeros((w,h))
for sample in samples:
D = ((self.codebook-sample)**2).sum(axis=-1)
winner = np.unravel_index(np.argmin(D), D.shape)
density[winner] += 1
return density
def get_nearest(self, x, y, data):
print x,y
D = ((self.codebook[x,y,:]-data)**2).sum(axis=-1)
winner = np.argmin(D)
return data[winner,:], winner
def get_n_nearest_indices(self, x, y, data):
D = ((self.codebook[x,y,:]-data)**2).sum(axis=-1)
winner = np.argsort(D)
return winner, D
def learn(self, samples, epochs=10000, sigma=(10, 0.001), lrate=(0.5,0.005)):
''' Learn samples '''
sigma_i, sigma_f = sigma
lrate_i, lrate_f = lrate
lrate = lrate_i
sigma = sigma_i
s = samples.shape[0]
for i in range(epochs):
if i%500==0:
print "Epoch \t %d /\t %d \tLrate:%.2f\t Sigma:%.2f" % (i, epochs, lrate, sigma)
# Adjust learning rate and neighborhood
t = i/float(epochs)
lrate = lrate_i*(lrate_f/float(lrate_i))**t
sigma = sigma_i*(sigma_f/float(sigma_i))**t
# Get random sample
index = np.random.randint(0,s)
data = samples[index]
# Get index of nearest node (minimum distance)
D = ((self.codebook-data)**2).sum(axis=-1)
winner = np.unravel_index(np.argmin(D), D.shape)
# Generate a Gaussian centered on winner
G = Gaussian(D.shape, winner, sigma)
G = np.nan_to_num(G)
# Move nodes towards sample according to Gaussian
delta = self.codebook-data
for i in range(self.codebook.shape[-1]):
self.codebook[...,i] -= lrate * G * delta[...,i]
# -----------------------------------------------------------------------------
if __name__ == '__main__':
import pylab
pylab.figure(1)
g = Gaussian((20,10), (1,5), 1.0)
pylab.imshow(g)
pylab.figure(2)
g = blitz_gaussian((20,10), (1,8), 1.0)
pylab.imshow(g)
pylab.show()
import matplotlib
import matplotlib.pyplot as plt
try: from voronoi import voronoi
except: voronoi = None
def learn(network, samples, epochs=25000, sigma=(10, 0.01), lrate=(0.5,0.005)):
network.learn(samples, epochs)
fig = plt.figure(figsize=(10,10))
axes = fig.add_subplot(1,1,1)
# Draw samples
x,y = samples[:,0], samples[:,1]
plt.scatter(x, y, s=1.0, color='b', alpha=0.1, zorder=1)
# Draw network
x,y = network.codebook[...,0], network.codebook[...,1]
if len(network.codebook.shape) > 2:
for i in range(network.codebook.shape[0]):
plt.plot (x[i,:], y[i,:], 'k', alpha=0.85, lw=1.5, zorder=2)
for i in range(network.codebook.shape[1]):
plt.plot (x[:,i], y[:,i], 'k', alpha=0.85, lw=1.5, zorder=2)
else:
plt.plot (x, y, 'k', alpha=0.85, lw=1.5, zorder=2)
plt.scatter (x, y, s=50, c='w', edgecolors='k', zorder=3)
if voronoi is not None:
segments = voronoi(x.ravel(),y.ravel())
lines = matplotlib.collections.LineCollection(segments, color='0.65')
axes.add_collection(lines)
plt.axis([0,1,0,1])
plt.xticks([]), plt.yticks([])
plt.show()
# Example 1: 2d uniform distribution (1d)
# -------------------------------------------------------------------------
print 'One-dimensional SOM over two-dimensional uniform square'
som = SOM(100,2)
samples = np.random.random((10000,2))
learn(som, samples)
# Example 2: 2d uniform distribution (2d)
# -------------------------------------------------------------------------
print 'Two-dimensional SOM over two-dimensional uniform square'
som = SOM(10,10,2)
samples = np.random.random((10000,2))
learn(som, samples)
# Example 3: 2d non-uniform distribution (2d)
# -------------------------------------------------------------------------
print 'Two-dimensional SOM over two-dimensional non-uniform disc'
som = SOM(10,10,2)
samples = np.random.normal(loc=.5, scale=.2,size=(10000,2))
learn(som, samples)
# Example 4: 2d non-uniform disc distribution (2d)
# -------------------------------------------------------------------------
print 'Two-dimensional SOM over two-dimensional non-uniform ring'
som = SOM(10,10,2)
angles = np.random.random(10000)*2*np.pi
radius = 0.25+np.random.random(10000)*.25
samples = np.zeros((10000,2))
samples[:,0] = 0.5+radius*np.cos(angles)
samples[:,1] = 0.5+radius*np.sin(angles)
learn(som, samples)
|
py | b40967377935b425fe238818e693f7456625bd59 | import os
import pickle
import numpy as np
import pandas as pd
import random
import torch
import math
from torch.autograd import Variable
from helper import *
class DataLoader():
def __init__(self,f_prefix, batch_size=4, seq_length=10, num_of_validation = 0, forcePreProcess=False, infer=False, generate = False):
'''
Initialiser function for the DataLoader class
params:
batch_size : Size of the mini-batch
seq_length : Sequence length to be considered
num_of_validation : number of validation dataset will be used
infer : flag for test mode
generate : flag for data generation mode
forcePreProcess : Flag to forcefully preprocess the data again from csv files
'''
# test_files_path = 'data/test/IRVLab/'
# train_files_path = 'data/train/IRVLab/'
# for (_, _, filenames) in os.walk(test_files_path):
# base_test_dataset = filenames
# break
# for (_, _, filenames) in os.walk(train_files_path):
# base_train_dataset = filenames
# base test files
base_test_dataset= [ '/mnt/data1/vdd_optical_flow_labels/test/IRVLab/pool_swimmer_004_A.txt'#pool_flipper_001_A.txt'#, '/data/test/IRVLab/pool_flipper_003_A_0009.txt'#/data/test/biwi/biwi_eth.txt',
# '/data/test/crowds/crowds_zara01.txt',
# '/data/test/crowds/uni_examples.txt',
# '/data/test/stanford/coupa_0.txt',
# '/data/test/stanford/coupa_1.txt', '/data/test/stanford/gates_2.txt','/data/test/stanford/hyang_0.txt','/data/test/stanford/hyang_1.txt','/data/test/stanford/hyang_3.txt','/data/test/stanford/hyang_8.txt',
# '/data/test/stanford/little_0.txt','/data/test/stanford/little_1.txt','/data/test/stanford/little_2.txt','/data/test/stanford/little_3.txt','/data/test/stanford/nexus_5.txt','/data/test/stanford/nexus_6.txt',
# '/data/test/stanford/quad_0.txt','/data/test/stanford/quad_1.txt','/data/test/stanford/quad_2.txt','/data/test/stanford/quad_3.txt'
]
#base train files
base_train_dataset = ['/mnt/data1/vdd_optical_flow_labels/test/IRVLab/pool_swimmer_001_A.txt'#pool_flipper_003_A_0001.txt'#, '/data/test/IRVLab/pool_flipper_003_A_0004.txt'#/data/train/biwi/biwi_hotel.txt',
#'/data/train/crowds/arxiepiskopi1.txt','/data/train/crowds/crowds_zara02.txt',
#'/data/train/crowds/crowds_zara03.txt','/data/train/crowds/students001.txt','/data/train/crowds/students003.txt',
#'/data/train/mot/PETS09-S2L1.txt',
#'/data/train/stanford/bookstore_0.txt','/data/train/stanford/bookstore_1.txt','/data/train/stanford/bookstore_2.txt','/data/train/stanford/bookstore_3.txt','/data/train/stanford/coupa_3.txt','/data/train/stanford/deathCircle_0.txt','/data/train/stanford/deathCircle_1.txt','/data/train/stanford/deathCircle_2.txt','/data/train/stanford/deathCircle_3.txt',
#'/data/train/stanford/deathCircle_4.txt','/data/train/stanford/gates_0.txt','/data/train/stanford/gates_1.txt','/data/train/stanford/gates_3.txt','/data/train/stanford/gates_4.txt','/data/train/stanford/gates_5.txt','/data/train/stanford/gates_6.txt','/data/train/stanford/gates_7.txt','/data/train/stanford/gates_8.txt','/data/train/stanford/hyang_4.txt',
#'/data/train/stanford/hyang_5.txt','/data/train/stanford/hyang_6.txt','/data/train/stanford/hyang_9.txt','/data/train/stanford/nexus_0.txt','/data/train/stanford/nexus_1.txt','/data/train/stanford/nexus_2.txt','/data/train/stanford/nexus_3.txt','/data/train/stanford/nexus_4.txt','/data/train/stanford/nexus_7.txt','/data/train/stanford/nexus_8.txt','/data/train/stanford/nexus_9.txt'
]
# dimensions of each file set
self.dataset_dimensions = {'IRVLab':[320,240]}
self.obs_length = 5
# List of data directories where raw data resides
self.base_train_path = '/mnt/data1/vdd_optical_flow_labels/train/'
self.base_test_path = '/mnt/data1/vdd_optical_flow_labels/test/'
self.base_validation_path = '/mnt/data1/vdd_optical_flow_labels/valid/'
# check infer flag, if true choose test directory as base directory
if infer is False:
self.base_data_dirs = base_train_dataset
else:
self.base_data_dirs = base_test_dataset
# get all files using python os and base directories
self.train_dataset = self.get_dataset_path(self.base_train_path, f_prefix)
self.test_dataset = self.get_dataset_path(self.base_test_path, f_prefix)
self.validation_dataset = self.get_dataset_path(self.base_validation_path, f_prefix)
# if generate mode, use directly train base files
if generate:
self.train_dataset = [os.path.join(f_prefix, dataset[1:]) for dataset in base_train_dataset]
#request of use of validation dataset
if num_of_validation>0:
self.additional_validation = True
else:
self.additional_validation = False
# check validation dataset availibility and clip the reuqested number if it is bigger than available validation dataset
if self.additional_validation:
if len(self.validation_dataset) == 0:
print("There is no validation dataset.Aborted.")
self.additional_validation = False
else:
num_of_validation = np.clip(num_of_validation, 0, len(self.validation_dataset))
self.validation_dataset = random.sample(self.validation_dataset, num_of_validation)
# if not infer mode, use train dataset
if infer is False:
self.data_dirs = self.train_dataset
else:
# use validation dataset
if self.additional_validation:
self.data_dirs = self.validation_dataset
# use test dataset
else:
self.data_dirs = self.test_dataset
self.infer = infer
self.generate = generate
# Number of datasets
self.numDatasets = len(self.data_dirs)
# array for keepinng target ped ids for each sequence
self.target_ids = []
# Data directory where the pre-processed pickle file resides
self.train_data_dir = os.path.join(f_prefix, self.base_train_path)
self.test_data_dir = os.path.join(f_prefix, self.base_test_path)
self.val_data_dir = os.path.join(f_prefix, self.base_validation_path)
# Store the arguments
self.batch_size = batch_size
self.seq_length = seq_length
self.orig_seq_lenght = seq_length
# Validation arguments
self.val_fraction = 0
# Define the path in which the process data would be stored
self.data_file_tr = os.path.join(self.train_data_dir, "trajectories_train.cpkl")
self.data_file_te = os.path.join(self.base_test_path, "trajectories_test.cpkl")
self.data_file_vl = os.path.join(self.val_data_dir, "trajectories_val.cpkl")
# for creating a dict key: folder names, values: files in this folder
self.create_folder_file_dict()
if self.additional_validation:
# If the file doesn't exist or forcePreProcess is true
if not(os.path.exists(self.data_file_vl)) or forcePreProcess:
print("Creating pre-processed validation data from raw data")
# Preprocess the data from the csv files of the datasets
# Note that this data is processed in frames
self.frame_preprocess(self.validation_dataset, self.data_file_vl, self.additional_validation)
if self.infer:
# if infer mode, and no additional files -> test preprocessing
if not self.additional_validation:
if not(os.path.exists(self.data_file_te)) or forcePreProcess:
print("Creating pre-processed test data from raw data")
# Preprocess the data from the csv files of the datasets
# Note that this data is processed in frames
print("Working on directory: ", self.data_file_te)
self.frame_preprocess(self.data_dirs, self.data_file_te)
# if infer mode, and there are additional validation files -> validation dataset visualization
else:
print("Validation visualization file will be created")
# if not infer mode
else:
# If the file doesn't exist or forcePreProcess is true -> training pre-process
if not(os.path.exists(self.data_file_tr)) or forcePreProcess:
print("Creating pre-processed training data from raw data")
# Preprocess the data from the csv files of the datasets
# Note that this data is processed in frames
self.frame_preprocess(self.data_dirs, self.data_file_tr)
if self.infer:
# Load the processed data from the pickle file
if not self.additional_validation: #test mode
#print("Called test")
self.load_preprocessed(self.data_file_te)
else: # validation mode
#print("Called validation")
self.load_preprocessed(self.data_file_vl, True)
else: # training mode
#print("Called train")
self.load_preprocessed(self.data_file_tr)
# Reset all the data pointers of the dataloader object
self.reset_batch_pointer(valid=False)
self.reset_batch_pointer(valid=True)
def frame_preprocess(self, data_dirs, data_file, validation_set = False):
'''
Function that will pre-process the pixel_pos.csv files of each dataset
into data with occupancy grid that can be used
params:
data_dirs : List of directories where raw data resides
data_file : The file into which all the pre-processed data needs to be stored
validation_set: true when a dataset is in validation set
'''
# all_frame_data would be a list of list of numpy arrays corresponding to each dataset
# Each numpy array will correspond to a frame and would be of size (numPeds, 3) each row
# containing pedID, x, y
all_frame_data = []
# Validation frame data
valid_frame_data = []
# frameList_data would be a list of lists corresponding to each dataset
# Each list would contain the frameIds of all the frames in the dataset
frameList_data = []
valid_numPeds_data= []
# numPeds_data would be a list of lists corresponding to each dataset
# Each list would contain the number of pedestrians in each frame in the dataset
numPeds_data = []
#each list includes ped ids of this frame
pedsList_data = []
valid_pedsList_data = []
# target ped ids for each sequence
target_ids = []
orig_data = []
# Index of the current dataset
dataset_index = 0
# For each dataset
for directory in data_dirs:
# Load the data from the txt file
print("Now processing: ", directory)
column_names = ['frame_num','ped_id','xmin','ymin','xmax','ymax', 'xFlow', 'yFlow']
# if training mode, read train file to pandas dataframe and process
if self.infer is False:
df = pd.read_csv(directory, dtype={'frame_num':'int','ped_id':'int', 'xFlow':'float', 'yFlow':'float'}, delimiter = ' ', header=None, names=column_names)
self.target_ids = np.array(df.drop_duplicates(subset={'ped_id'}, keep='first', inplace=False)['ped_id'])
else:
# if validation mode, read validation file to pandas dataframe and process
if self.additional_validation:
df = pd.read_csv(directory, dtype={'frame_num':'int','ped_id':'int', 'xFlow':'float', 'yFlow':'float' }, delimiter = ' ', header=None, names=column_names)
self.target_ids = np.array(df.drop_duplicates(subset={'ped_id'}, keep='first', inplace=False)['ped_id'])
# if test mode, read test file to pandas dataframe and process
else:
column_names = ['frame_num','ped_id','xmin','ymin','xmax','ymax','xFlow', 'yFlow']
df = pd.read_csv(directory, dtype={'frame_num':'int','ped_id':'int', 'xFlow':'float', 'yFlow':'float'}, delimiter = ' ', header=None, names=column_names, converters = {c:lambda x: float('nan') if x == '?' else float(x) for c in ['xmin','ymin','xmax','ymax']})
#self.target_ids = np.array(df[df['ymin'].isnull()].drop_duplicates(subset={'ped_id'}, keep='first', inplace=False)['ped_id'])
self.target_ids = np.array(df.drop_duplicates(subset={'ped_id'}, keep='first', inplace=False)['ped_id'])
# convert pandas -> numpy array
data = np.array(df)
#print("DATA",data)
# keep original copy of file
orig_data.append(data)
#swap x and y points (in txt file it is like -> y,x)
data = np.swapaxes(data,0,1)
frameList = []
# get frame numbers
for frameNum in data[0,:].tolist():
if frameNum not in frameList:
frameList.append(frameNum)
#frameList = data[0, :]
# Number of frames
numFrames = len(frameList)
#print("FRAME NUMS",numFrames)
# Add the list of frameIDs to the frameList_data
frameList_data.append(frameList)
# Initialize the list of numPeds for the current dataset
numPeds_data.append([])
valid_numPeds_data.append([])
# Initialize the list of numPeds for the current dataset
numPeds_data.append([])
valid_numPeds_data.append([])
# Initialize the list of numpy arrays for the current dataset
all_frame_data.append([])
# Initialize the list of numpy arrays for the current dataset
valid_frame_data.append([])
# list of peds for each frame
pedsList_data.append([])
valid_pedsList_data.append([])
target_ids.append(self.target_ids)
for ind, frame in enumerate(frameList):
# Extract all pedestrians in current frame
pedsInFrame = data[: , data[0, :] == frame]
#print("peds in %d: %s"%(frame,str(pedsInFrame)))
# Extract peds list
pedsList = pedsInFrame[1, :].tolist()
# Add number of peds in the current frame to the stored data
# Initialize the row of the numpy array
pedsWithPos = []
# For each ped in the current frame
for ped in pedsList:
# Extract their x and y positions
current_xmin = pedsInFrame[2, pedsInFrame[1, :] == ped][0]
current_ymin = pedsInFrame[3, pedsInFrame[1, :] == ped][0]
current_xmax = pedsInFrame[4, pedsInFrame[1, :] == ped][0]
current_ymax = pedsInFrame[5, pedsInFrame[1, :] == ped][0]
xFlow = pedsInFrame[6, pedsInFrame[1, :] == ped][0]
yFlow = pedsInFrame[7, pedsInFrame[1, :] == ped][0]
#print(current_xmin,current_ymin,current_xmax,current_ymax)
# Add their pedID, x, y to the row of the numpy array
pedsWithPos.append([ped, current_xmin, current_ymin, current_xmax, current_ymax, xFlow, yFlow])
# At inference time, data generation and if dataset is a validation dataset, no validation data
if (ind >= numFrames * self.val_fraction) or (self.infer) or (self.generate) or (validation_set):
# Add the details of all the peds in the current frame to all_frame_data
all_frame_data[dataset_index].append(np.array(pedsWithPos))
pedsList_data[dataset_index].append(pedsList)
numPeds_data[dataset_index].append(len(pedsList))
else:
valid_frame_data[dataset_index].append(np.array(pedsWithPos))
valid_pedsList_data[dataset_index].append(pedsList)
valid_numPeds_data[dataset_index].append(len(pedsList))
dataset_index += 1
# Save the arrays in the pickle file
f = open(data_file, "wb")
pickle.dump((all_frame_data, frameList_data, numPeds_data, valid_numPeds_data, valid_frame_data, pedsList_data, valid_pedsList_data, target_ids, orig_data), f, protocol=2)
f.close()
#print("Done here")
def load_preprocessed(self, data_file, validation_set = False):
'''
Function to load the pre-processed data into the DataLoader object
params:
data_file : the path to the pickled data file
validation_set : flag for validation dataset
'''
# Load data from the pickled file
if(validation_set):
print("Loading validaton datasets: ", data_file)
else:
print("Loading train or test dataset: ", data_file)
print("DATA FILE************",data_file)
f = open(data_file, 'rb')
self.raw_data = pickle.load(f)
#print(len(self.raw_data))
f.close()
# Get all the data from the pickle file
self.data = self.raw_data[0]
self.frameList = self.raw_data[1]
self.numPedsList = self.raw_data[2]
self.valid_numPedsList = self.raw_data[3]
self.valid_data = self.raw_data[4]
self.pedsList = self.raw_data[5]
self.valid_pedsList = self.raw_data[6]
self.target_ids = self.raw_data[7]
self.orig_data = self.raw_data[8]
counter = 0
valid_counter = 0
print('Sequence size(frame) ------>',self.seq_length)
print('One batch size (frame)--->-', self.batch_size*self.seq_length)
# For each dataset
for dataset in range(len(self.data)):
# get the frame data for the current dataset
all_frame_data = self.data[dataset]
valid_frame_data = self.valid_data[dataset]
dataset_name = self.data_dirs[dataset].split('/')[-1]
# calculate number of sequence
num_seq_in_dataset = int(len(all_frame_data) / (self.seq_length))
num_valid_seq_in_dataset = int(len(valid_frame_data) / (self.seq_length))
if not validation_set:
print('Training data from training dataset(name, # frame, #sequence)--> ', dataset_name, ':', len(all_frame_data),':', (num_seq_in_dataset))
print('Validation data from training dataset(name, # frame, #sequence)--> ', dataset_name, ':', len(valid_frame_data),':', (num_valid_seq_in_dataset))
else:
print('Validation data from validation dataset(name, # frame, #sequence)--> ', dataset_name, ':', len(all_frame_data),':', (num_seq_in_dataset))
# Increment the counter with the number of sequences in the current dataset
counter += num_seq_in_dataset
valid_counter += num_valid_seq_in_dataset
# Calculate the number of batches
self.num_batches = int(counter/self.batch_size)
# print('***************************************')
# print('Num Batches',self.num_batches)
# print('Counter',counter)
# print('Batch Size',self.batch_size)
# print('***************************************')
self.valid_num_batches = int(valid_counter/self.batch_size)
if not validation_set:
print('Total number of training batches:', self.num_batches)
print('Total number of validation batches:', self.valid_num_batches)
else:
print('Total number of validation batches:', self.num_batches)
# self.valid_num_batches = self.valid_num_batches * 2
def adjustFramesForOpticalFlow(self, seqFrames):
"""
seqFrames: Shape (sequenceLength, numDivers, 6)
return transformed frames (sequenceLength, numDivers,4)
"""
# Forward transform for observations
obsFrames = seqFrames[:self.obs_length]
orig_boxes_obs = obsFrames[:,:,0:4]
transform_observed = np.reshape(obsFrames[1:,0,4:], (orig_boxes_obs.shape[0]-1,2))
obs_transformed = self.transformedBoxes(orig_boxes_obs, transform_observed)
# Inverse transform for predictions
predFrames = np.flip(seqFrames[self.obs_length-1:], axis=(0))
# Change the sign of the transforms
predFrames[:,:,4:] *= -1.0
predBoxes = predFrames[:,:,0:4]
transform_pred = np.reshape(predFrames[1:,0,4:], (predBoxes.shape[0]-1,2))
pred_transformed = self.transformedBoxes(predBoxes, transform_pred)
predRev = np.flip(pred_transformed, axis=(0))
# Join the two together
finalTransforms = np.zeros((seqFrames.shape[0], seqFrames.shape[1], 4), dtype=float)
finalTransforms[:self.obs_length] = obs_transformed[:,:,:]
finalTransforms[self.obs_length:] = predRev[1:,:,:]
return finalTransforms
def transformedBoxes(self, orig_boxes, transforms):
"""
Returns the boxes as seen from the frame of reference of the last frame
orig_boxes: original bounding boxes - each in its own frame of reference. Shape (sequenceLength+1, numDivers, 5)
transforms: The transform gives the deviation of the current frame with respect to the previous frame. shape: (seqLength+1, 2)
"""
transformedFrames = np.zeros_like(orig_boxes, dtype=float)
currIdx = transforms.shape[0] - 1
currDeltaSum = np.zeros((2), dtype=float)
# Last frame unchanged
transformedFrames[currIdx+1,:,:] = orig_boxes[-1,:,:]
while currIdx >= 0:
currDeltaSum += transforms[currIdx]
concatenatedTransform = np.concatenate((currDeltaSum, currDeltaSum))
for i in range(orig_boxes.shape[1]):
if (False in (orig_boxes[currIdx,i] == 0)):
transformedFrames[currIdx, i] = orig_boxes[currIdx,i] + concatenatedTransform
#transformedFrames[currIdx,:] = orig_boxes[currIdx,:] + concatenatedTransform
currIdx -= 1
return transformedFrames
def next_batch(self):
'''
Function to get the next batch of points
'''
# Source data
x_batch = []
# Target data
y_batch = []
# Dataset data
d = []
# pedlist per sequence
numPedsList_batch = []
# pedlist per sequence
PedsList_batch = []
#return target_id
target_ids = []
# Iteration index
i = 0
#print("BATCH_SIZE",self.batch_size)
while i < self.batch_size:
# Extract the frame data of the current dataset
#print("Getting batch from",self.get_file_name())
frame_data = self.data[self.dataset_pointer]
numPedsList = self.numPedsList[self.dataset_pointer]
pedsList = self.pedsList[self.dataset_pointer]
# Get the frame pointer for the current dataset
idx = self.frame_pointer
# While there is still seq_length number of frames left in the current dataset
# Instead of returning an x array of seq length, we will return an array of len seq_length + 1
# of this, x[:-1] becomes the training instance, x[1:] becomes the target
# That is, the output sequence is expected to be the future prediction of the input sequence
if idx + self.seq_length <= len(frame_data):
# All the data in this sequence
seq_source_frame_data = frame_data[idx:idx+self.seq_length]
seq_numPedsList = numPedsList[idx:idx+self.seq_length]
seq_PedsList = pedsList[idx:idx+self.seq_length]
seq_target_frame_data = frame_data[idx+1:idx+self.seq_length+1]
# Number of unique peds in this sequence of frames
x_batch.append(seq_source_frame_data)
y_batch.append(seq_target_frame_data)
numPedsList_batch.append(seq_numPedsList)
PedsList_batch.append(seq_PedsList)
# get correct target ped id for the sequence
#print("******************")
#print(self.frame_pointer,self.seq_length)
#print("TARGET_IDS PRIVATE",self.target_ids)
#print(self.target_ids[self.dataset_pointer][math.floor((self.frame_pointer)/self.seq_length)])
#target_ids.append(self.target_ids[self.dataset_pointer][math.floor((self.frame_pointer)/self.seq_length)])
self.frame_pointer += (self.seq_length)
d.append(self.dataset_pointer)
i += 1
else:
# Not enough frames left
# Increment the dataset pointer and set the frame_pointer to zero
#print("Ticking batch")
self.tick_batch_pointer(valid=False)
#print("TARGET IDS IN NEXT BATCH",target_ids)
#pedSeq = []
#for pedSeq in PedsList_batch
return np.array(x_batch), y_batch, d, numPedsList_batch, np.array(PedsList_batch), target_ids
def next_valid_batch(self):
'''
Function to get the next Validation batch of points
'''
# Source data
x_batch = []
# Target data
y_batch = []
# Dataset data
d = []
# pedlist per sequence
numPedsList_batch = []
# pedlist per sequence
PedsList_batch = []
target_ids = []
# Iteration index
i = 0
while i < self.batch_size:
# Extract the frame data of the current dataset
frame_data = self.valid_data[self.valid_dataset_pointer]
numPedsList = self.valid_numPedsList[self.valid_dataset_pointer]
pedsList = self.valid_pedsList[self.valid_dataset_pointer]
# Get the frame pointer for the current dataset
idx = self.valid_frame_pointer
# While there is still seq_length number of frames left in the current dataset
if idx + self.seq_length < len(frame_data):
# All the data in this sequence
# seq_frame_data = frame_data[idx:idx+self.seq_length+1]
seq_source_frame_data = frame_data[idx:idx+self.seq_length]
seq_numPedsList=numPedsList[idx:idx+self.seq_length]
seq_PedsList = pedsList[idx:idx+self.seq_length]
seq_target_frame_data = frame_data[idx+1:idx+self.seq_length+1]
# Number of unique peds in this sequence of frames
x_batch.append(seq_source_frame_data)
y_batch.append(seq_target_frame_data)
numPedsList_batch.append(seq_numPedsList)
PedsList_batch.append(seq_PedsList)
# get correct target ped id for the sequence
target_ids.append(self.target_ids[self.dataset_pointer][math.floor((self.valid_frame_pointer)/self.seq_length)])
self.valid_frame_pointer += self.seq_length
d.append(self.valid_dataset_pointer)
i += 1
else:
# Not enough frames left
# Increment the dataset pointer and set the frame_pointer to zero
self.tick_batch_pointer(valid=True)
return np.array(x_batch), y_batch, d, numPedsList_batch, PedsList_batch, target_ids
def tick_batch_pointer(self, valid=False):
'''
Advance the dataset pointer
'''
if not valid:
# Go to the next dataset
self.dataset_pointer += 1
# Set the frame pointer to zero for the current dataset
self.frame_pointer = 0
# If all datasets are done, then go to the first one again
if self.dataset_pointer >= len(self.data):
#print("Returning to original dataset")
self.dataset_pointer = 0
print("*******************")
print("now processing: %s"% self.get_file_name())
else:
# Go to the next dataset
self.valid_dataset_pointer += 1
# Set the frame pointer to zero for the current dataset
self.valid_frame_pointer = 0
# If all datasets are done, then go to the first one again
if self.valid_dataset_pointer >= len(self.valid_data):
self.valid_dataset_pointer = 0
print("*******************")
print("now processing: %s"% self.get_file_name(pointer_type = 'valid'))
def reset_batch_pointer(self, valid=False):
'''
Reset all pointers
'''
if not valid:
# Go to the first frame of the first dataset
self.dataset_pointer = 0
self.frame_pointer = 0
else:
self.valid_dataset_pointer = 0
self.valid_frame_pointer = 0
def switch_to_dataset_type(self, train = False, load_data = True):
# function for switching between train and validation datasets during training session
print('--------------------------------------------------------------------------')
if not train: # if train mode, switch to validation mode
if self.additional_validation:
print("Dataset type switching: training ----> validation")
self.orig_seq_lenght, self.seq_length = self.seq_length, self.orig_seq_lenght
self.data_dirs = self.validation_dataset
self.numDatasets = len(self.data_dirs)
if load_data:
self.load_preprocessed(self.data_file_vl, True)
self.reset_batch_pointer(valid=False)
else:
print("There is no validation dataset.Aborted.")
return
else:# if validation mode, switch to train mode
print("Dataset type switching: validation -----> training")
self.orig_seq_lenght, self.seq_length = self.seq_length, self.orig_seq_lenght
self.data_dirs = self.train_dataset
self.numDatasets = len(self.data_dirs)
if load_data:
self.load_preprocessed(self.data_file_tr)
self.reset_batch_pointer(valid=False)
self.reset_batch_pointer(valid=True)
def convert_proper_array(self, x_seq, num_pedlist, pedlist):
#converter function to appropriate format. Instead of direcly use ped ids, we are mapping ped ids to
#array indices using a lookup table for each sequence -> speed
#output: seq_lenght (real sequence lenght+1)*max_ped_id+1 (biggest id number in the sequence)*2 (x,y)
num_inputs = 6
#get unique ids from sequence
unique_ids = pd.unique(np.concatenate(pedlist).ravel().tolist()).astype(int)
# create a lookup table which maps ped ids -> array indices
lookup_table = dict(zip(unique_ids, range(0, len(unique_ids))))
seq_data = np.zeros(shape=(x_seq.shape[0], len(lookup_table), num_inputs))
# create new structure of array
for ind, frame in enumerate(x_seq):
#print("FRAME",frame[:,1:5])
corr_index = [lookup_table[x] for x in frame[:, 0]]
#print("CORR_INDEX",corr_index)
#print("SEQ DATA SHAPE", seq_data[ind, corr_index,:].shape)
#print("FRAME DATA SHAPE",frame[:,1:5].shape)
seq_data[ind, corr_index,:] = frame[:,1:(num_inputs+1)]
x_seq = self.adjustFramesForOpticalFlow(seq_data)
return_arr = Variable(torch.from_numpy(np.array(x_seq)).float())
return return_arr, lookup_table
def add_element_to_dict(self, dict, key, value):
# helper function to add a element to dictionary
dict.setdefault(key, [])
dict[key].append(value)
def get_dataset_path(self, base_path, f_prefix):
# get all datasets from given set of directories
dataset = []
dir_names = unique_list(self.get_all_directory_namelist())
for dir_ in dir_names:
dir_path = os.path.join(f_prefix, base_path, dir_)
file_names = get_all_file_names(dir_path)
[dataset.append(os.path.join(dir_path, file_name)) for file_name in file_names]
return dataset
def get_file_name(self, offset=0, pointer_type = 'train'):
#return file name of processing or pointing by dataset pointer
if pointer_type == 'train':
return self.data_dirs[self.dataset_pointer+offset].split('/')[-1]
elif pointer_type == 'valid':
return self.data_dirs[self.valid_dataset_pointer+offset].split('/')[-1]
def create_folder_file_dict(self):
# create a helper dictionary folder name:file name
self.folder_file_dict = {}
for dir_ in self.base_data_dirs:
folder_name = dir_.split('/')[-2]
file_name = dir_.split('/')[-1]
self.add_element_to_dict(self.folder_file_dict, folder_name, file_name)
def get_directory_name(self, offset=0):
#return folder name of file of processing or pointing by dataset pointer
folder_name = self.data_dirs[self.dataset_pointer+offset].split('/')[-2]
return folder_name
def get_directory_name_with_pointer(self, pointer_index):
# get directory name using pointer index
folder_name = self.data_dirs[pointer_index].split('/')[-2]
return folder_name
def get_all_directory_namelist(self):
#return all directory names in this collection of dataset
folder_list = [data_dir.split('/')[-2] for data_dir in (self.base_data_dirs)]
return folder_list
def get_file_path(self, base, prefix, model_name ='', offset=0):
#return file path of file of processing or pointing by dataset pointer
folder_name = self.data_dirs[self.dataset_pointer+offset].split('/')[-2]
base_folder_name=os.path.join(prefix, base, model_name, folder_name)
return base_folder_name
def get_base_file_name(self, key):
# return file name using folder- file dictionary
return self.folder_file_dict[key]
def get_len_of_dataset(self):
# return the number of dataset in the mode
return len(self.data)
def clean_test_data(self, x_seq, target_id, obs_lenght, predicted_lenght):
#remove (pedid, x , y) array if x or y is nan for each frame in observed part (for test mode)
for frame_num in range(obs_lenght):
nan_elements_index = np.where(np.isnan(x_seq[frame_num][:, 4]))
try:
x_seq[frame_num] = np.delete(x_seq[frame_num], nan_elements_index[0], axis=0)
except ValueError:
print("an error has been occured")
pass
for frame_num in range(obs_lenght, obs_lenght+predicted_lenght):
nan_elements_index = x_seq[frame_num][:, 0] != target_id
try:
x_seq[frame_num] = x_seq[frame_num][~nan_elements_index]
except ValueError:
pass
def clean_ped_list(self, x_seq, pedlist_seq, target_id, obs_lenght, predicted_lenght):
# remove peds from pedlist after test cleaning
target_id_arr = [target_id]
for frame_num in range(obs_lenght+predicted_lenght):
pedlist_seq[frame_num] = x_seq[frame_num][:,0]
def write_to_file(self, data, base, f_prefix, model_name):
# write all files as txt format
self.reset_batch_pointer()
for file in range(self.numDatasets):
path = self.get_file_path(f_prefix, base, model_name, file)
file_name = self.get_file_name(file)
self.write_dataset(data[file], file_name, path)
def write_dataset(self, dataset_seq, file_name, path):
# write a file in txt format
print("Writing to file path: %s, file_name: %s"%(path, file_name))
out = np.concatenate(dataset_seq, axis = 0)
#np.savetxt(os.path.join(path, file_name), out, fmt = "%1d %1.1f %.3f %.3f", newline='\n')
np.savetxt(os.path.join(path, file_name), out, fmt = "%1d %1.1f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f", newline='\n')
def write_to_plot_file(self, data, path):
# write plot file for further visualization in pkl format
self.reset_batch_pointer()
print("Length of data:", len(data))
print("Dataloader.numDatasets:", self.numDatasets)
print("Now starting loop")
for file in range(self.numDatasets):
print("In iteration:", file)
print("Length of data currently:", len(data))
file_name = self.get_file_name(file)
file_name = file_name.split('.')[0] + '.pkl'
print("Writing to plot file path: %s, file_name: %s"%(path, file_name))
with open(os.path.join(path, file_name), 'wb') as f:
pickle.dump(data[file], f)
def get_frame_sequence(self, frame_lenght):
#print("frame pointer, frame length",self.frame_pointer, frame_lenght)
#begin and end of predicted fram numbers in this seq.
begin_fr = (self.frame_pointer - frame_lenght)
end_fr = (self.frame_pointer)
#frame_number = self.orig_data[self.dataset_pointer][begin_fr:end_fr, 0].transpose()
frameNum = self.frameList[self.dataset_pointer][begin_fr]#.transpose()
#print("frames from %d to %d"%(frameNum, frameNum + frame_lenght - 1))
frame_number = np.reshape([float(i) for i in range(int(frameNum),int(frameNum) + frame_lenght)],(frame_lenght))
return frame_number
def get_id_sequence(self, frame_lenght):
#begin and end of predicted fram numbers in this seq.
begin_fr = (self.frame_pointer - frame_lenght)
end_fr = (self.frame_pointer)
id_number = self.orig_data[self.dataset_pointer][begin_fr:end_fr, 1].transpose()
id_number = [int(i) for i in id_number]
return id_number
def get_dataset_dimension(self, file_name):
# return dataset dimension using dataset file name
return self.dataset_dimensions[file_name]
|
py | b4096768f81467102000cb8272cc68d0bcf020fd | # coding=utf-8
"""
Main module for HGEE (Hulixerian Game Engine Editor)
"""
__author__ = """Hossein Noroozpour"""
from ui.HGEOpenGLRenderingArea import OpenGLRenderingArea
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GdkPixbuf
from gi.repository import GLib
from ui.HGESideTab import SideTab
from core.HGEApplication import Application
class MainWindow(Gtk.Window):
"""MainWindow class for Editor"""
def __init__(self):
Gtk.Window.__init__(self, title="Hulixerian Game Engine Editor")
self.set_default_size(700, 500)
self.set_position(1)
self.m_menu_item_file = Gtk.MenuItem()
self.m_menu_item_file.set_label("_File")
self.m_menu_item_file.set_use_underline(True)
self.m_menu_item_file_new = Gtk.MenuItem()
self.m_menu_item_file_new.set_label("_New")
self.m_menu_item_file_new.set_use_underline(True)
self.m_menu_item_file_open = Gtk.MenuItem()
self.m_menu_item_file_open.set_label("_Open")
self.m_menu_item_file_open.set_use_underline(True)
self.m_menu_item_file_quit = Gtk.MenuItem()
self.m_menu_item_file_quit.set_label("_Quit")
self.m_menu_item_file_quit.set_use_underline(True)
self.m_menu_item_file_add = Gtk.MenuItem()
self.m_menu_item_file_add.set_label("_Add")
self.m_menu_item_file_add.set_use_underline(True)
self.m_menu_file = Gtk.Menu()
self.m_menu_item_file.set_submenu(self.m_menu_file)
self.m_menu_file.append(self.m_menu_item_file_new)
self.m_menu_file.append(self.m_menu_item_file_open)
self.m_menu_file.append(self.m_menu_item_file_add)
self.m_menu_file.append(self.m_menu_item_file_quit)
self.m_menu_item_edit = Gtk.MenuItem()
self.m_menu_item_edit.set_label("_Edit")
self.m_menu_item_edit.set_use_underline(True)
self.m_menu_edit = Gtk.Menu()
self.m_menu_item_edit.set_submenu(self.m_menu_edit)
self.m_menu_item_help = Gtk.MenuItem()
self.m_menu_item_help.set_label("_Help")
self.m_menu_item_help.set_use_underline(True)
self.m_menu_help = Gtk.Menu()
self.m_menu_item_help.set_submenu(self.m_menu_help)
self.m_menu_bar = Gtk.MenuBar()
self.m_menu_bar.append(self.m_menu_item_file)
self.m_menu_bar.append(self.m_menu_item_edit)
self.m_menu_bar.append(self.m_menu_item_help)
self.m_status_bar = Gtk.Statusbar()
self.m_render_area = Gtk.DrawingArea()
self.m_render_area_initialized = False
self.m_render_area.connect('configure_event', self.render_area_on_configure_event)
self.m_render_area.connect('draw', self.render_area_on_draw)
self.m_render_area.set_double_buffered(False)
self.m_render_area.set_hexpand(True)
self.m_render_area.set_vexpand(True)
m_viewport = Gtk.Grid()
m_viewport.attach(self.m_render_area, 0, 0, 1, 1)
m_viewport.set_hexpand(True)
m_viewport.set_vexpand(True)
self.m_side_tab = SideTab()
self.m_pane_main = Gtk.Paned()
self.m_pane_main.pack1(m_viewport, True, True)
self.m_pane_main.pack2(self.m_side_tab, True, True)
self.m_pane_main.set_position(500)
self.m_grid = Gtk.Grid()
self.m_grid.set_column_spacing(5)
self.m_grid.set_row_spacing(5)
self.m_grid.set_margin_top(5)
self.m_grid.set_margin_bottom(5)
self.m_grid.set_margin_left(5)
self.m_grid.set_margin_right(5)
self.add(self.m_grid)
self.m_grid.set_vexpand(True)
self.m_grid.set_hexpand(True)
self.m_grid.attach(self.m_menu_bar, 0, 0, 1, 1)
self.m_grid.attach(self.m_pane_main, 0, 1, 1, 1)
self.m_grid.attach(self.m_status_bar, 0, 2, 1, 1)
self.m_render_timeout = None
self.m_render_device = None
def initialize_render_area(self):
"""
Initialize render area.
"""
self.m_render_area_initialized = True
self.m_render_device = OpenGLRenderingArea(self.m_render_area.get_window())
self.m_render_device.set_application(Application())
self.m_render_device.set_profiler_window(self.m_side_tab.get_profiler_window())
self.m_render_timeout = GLib.timeout_add(18, self.m_render_device.render)
def render_area_on_configure_event(self, widget, event):
"""Configuring Render Area
:param event:
:param widget:
"""
if self.m_render_area_initialized:
self.m_render_device.set_size(event.width, event.height)
else:
self.initialize_render_area()
return True
def render_area_on_draw(self, widget, context):
"""
:param widget:
:param context:
:return:
"""
if self.m_render_area_initialized:
self.m_render_device.render()
else:
self.initialize_render_area()
return True
if __name__ == "__main__":
win = MainWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
py | b40968f06a635d91eb89aaae6a0187b85b3d4ca8 | """
Copyright 2018 NAVER Corp.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import argparse
import os
import numpy as np
import tensorflow as tf
import nsml
from nsml import DATASET_PATH, HAS_DATASET, IS_ON_NSML
from kin_dataset import KinQueryDataset, preprocess,preprocess2,data_augmentation,preprocess_origin
from kin_model import text_clf_ensemble_model
from nsml import GPU_NUM
# DONOTCHANGE: They are reserved for nsml
# This is for nsml leaderboard
def bind_model(sess, config):
# 학습한 모델을 저장하는 함수입니다.
def save(dir_name, *args):
# directory
os.makedirs(dir_name, exist_ok=True)
saver = tf.train.Saver()
saver.save(sess, os.path.join(dir_name, 'model'))
# 저장한 모델을 불러올 수 있는 함수입니다.
def load(dir_name, *args):
saver = tf.train.Saver()
# find checkpoint
ckpt = tf.train.get_checkpoint_state(dir_name)
if ckpt and ckpt.model_checkpoint_path:
checkpoint = os.path.basename(ckpt.model_checkpoint_path)
saver.restore(sess, os.path.join(dir_name, checkpoint))
else:
raise NotImplemented('No checkpoint!')
print('Model loaded')
def infer(raw_data, **kwargs):
"""
:param raw_data: raw input (여기서는 문자열)을 입력받습니다
:param kwargs:
:return:
"""
# dataset.py에서 작성한 preprocess 함수를 호출하여, 문자열을 벡터로 변환합니다
preprocessed_data1, preprocessed_data2 = preprocess2(raw_data, config.strmaxlen,test_data=False)
#preprocessed_data = preprocess_origin(raw_data, config.strmaxlen,test_data=False)
# 저장한 모델에 입력값을 넣고 prediction 결과를 리턴받습니다
infer_preds = []
for output in outputs:
infer_preds.append(sess.run(output, feed_dict={
input1: preprocessed_data1,
input2: preprocessed_data2,
is_training: False,
keep_prob:1}))
infer_pred = tf.concat(infer_preds, axis=1)
infer_pred = tf.reduce_mean(infer_pred, axis=1, keep_dims=True)
infer_pred = sess.run(infer_pred)
clipped = np.array((infer_pred) > config.threshold, dtype=np.int)
# clipped = np.array(infer_pred > config.threshold, dtype=np.int)
# DONOTCHANGE: They are reserved for nsml
# 리턴 결과는 [(확률, 0 or 1)] 의 형태로 보내야만 리더보드에 올릴 수 있습니다. 리더보드 결과에 확률의 값은 영향을 미치지 않습니다
return list(zip(infer_pred.flatten(), clipped.flatten()))
# DONOTCHANGE: They are reserved for nsml
# nsml에서 지정한 함수에 접근할 수 있도록 하는 함수입니다.
nsml.bind(save=save, load=load, infer=infer)
def data_normalization(data):
if is_data_norm:
return ((data - np.mean(data)) / np.std(data))
else:
return data
def _batch_loader(iterable, n=1):
"""
데이터를 배치 사이즈만큼 잘라서 보내주는 함수입니다. PyTorch의 DataLoader와 같은 역할을 합니다
:param iterable: 데이터 list, 혹은 다른 포맷
:param n: 배치 사이즈
:return:
"""
length = len(iterable)
for n_idx in range(0, length, n):
yield iterable[n_idx:min(n_idx + n, length)]
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
if __name__ == '__main__':
#if GPU_NUM:
# config_proto = tf.ConfigProto()
# config_proto.gpu_options.allow_growth = True
args = argparse.ArgumentParser()
# DONOTCHANGE: They are reserved for nsml
args.add_argument('--mode', type=str, default='train')
args.add_argument('--pause', type=int, default=0)
args.add_argument('--iteration', type=str, default='0')
# User options
args.add_argument('--output', type=int, default=1)
args.add_argument('--epochs', type=int, default=200)
args.add_argument('--batch', type=int, default=2000)
args.add_argument('--strmaxlen', type=int, default=400)
args.add_argument('--w2v_size',type=int, default=16)
args.add_argument('--embedding', type=int, default=8) # more bigger?
args.add_argument('--threshold', type=float, default=0.5)
args.add_argument('--lr',type=float,default=0.0005)
config = args.parse_args()
if not HAS_DATASET and not IS_ON_NSML: # It is not running on nsml
# This path have to be changed!
DATASET_PATH = '/home/leekh7411/PycharmProject/ai-hackathon-2018/kin_phase1/sample_data/kin/'
# Parameters for model configuration
L1_INPUT = config.embedding * config.strmaxlen # 8 x 400
FIN_OUTPUT = 1
learning_rate = config.lr
learning_rate_tf = tf.placeholder(tf.float32,[],name="lr")
train_decay = 0.99
character_size = 251
w2v_size = config.w2v_size
drop_out_val = 0.8
keep_prob = tf.placeholder(tf.float32)
is_training = tf.placeholder(tf.bool)
is_train = True
is_data_norm = False
n_classes = 32
beta = 0.1
# Input & Output layer
input1 = tf.placeholder(tf.int32, [None, config.strmaxlen], name="input-x1")
input2 = tf.placeholder(tf.int32, [None, config.strmaxlen], name="input-x2")
y_ = tf.placeholder(tf.float32, [None, FIN_OUTPUT], name="output-y")
# Add models for ensemble prediction
outputs = []
ensemble_size = 10
for i in range(ensemble_size):
outputs.append(text_clf_ensemble_model(input1, input2, character_size, config.embedding, is_train, keep_prob, n_classes,i))
# Make each model's loss and optimizer(train_step)
with tf.name_scope("loss-optimizer"):
# Binary Cross Entropy
def binary_cross_entropy_loss(y_,output):
return tf.reduce_mean(-(y_ * tf.log(tf.clip_by_value(output, 1e-10, 1.0))) - (1 - y_) * tf.log(tf.clip_by_value(1 - output, 1e-10, 1.0)))
bce_loss = []
for out in outputs:
bce_loss.append(binary_cross_entropy_loss(y_,out))
train_steps = []
for loss in bce_loss:
train_steps.append(tf.train.AdamOptimizer(learning_rate=learning_rate_tf).minimize(loss))
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# ========================================================================================#
# DONOTCHANGE: Reserved for nsml
bind_model(sess=sess, config=config)
# DONOTCHANGE: Reserved for nsml
if config.pause:
nsml.paused(scope=locals())
if config.mode == 'train':
# 데이터를 로드합니다.
dataset = KinQueryDataset(DATASET_PATH, config.strmaxlen)
dataset_len = len(dataset)
one_batch_size = dataset_len//config.batch
if dataset_len % config.batch != 0:
one_batch_size += 1
# epoch마다 학습을 수행합니다.
for epoch in range(config.epochs):
avg_loss = 0.0
avg_val_acc = 0.0
# Shuffle train data to prevent overfitting
s = np.random.permutation(dataset.labels.shape[0])
dataset.queries1 = dataset.queries1[s]
dataset.queries2 = dataset.queries2[s]
dataset.labels = dataset.labels[s]
#test_data1 = dataset.queries1_test
#test_data2 = dataset.queries2_test
#test_labels = dataset.labels_test
for i, (data1,data2,labels) in enumerate(_batch_loader(dataset, config.batch)):
# Divide Cross Validation Set
# *This validation is meaningless! because of all data will be shuffled
test_idx = (int)(len(labels) * 0.95)
train_data1 = data1[:test_idx]
train_data2 = data2[:test_idx]
test_data1 = data1[test_idx:]
test_data2 = data2[test_idx:]
train_labels = labels[:test_idx]
test_labels = labels[test_idx:]
# Test Validation Set
# For ensemble, test each models
def predict(output,test_data1,test_data2,is_train,_keep_prob):
pred = sess.run(output, feed_dict={input1: test_data1,input2: test_data2,
is_training: is_train, keep_prob: _keep_prob})
pred_clipped = np.array(pred > config.threshold, dtype=np.float32)
return pred_clipped
preds = []
for out in outputs:
preds.append(predict(out, test_data1, test_data2, False, 1))
# concat all predicted results([0.,1.,0.,1.,..],[1.,0.,1.,...],...) <- float data
pred = tf.concat(preds,axis=1)
# sum and mean all row data
pred = tf.reduce_mean(pred,axis=1,keep_dims=True)
# if five models result's is 0.8
# --> [1,1,1,1,0] --> sum(4) --> mean(4/5) --> 0.8 --> threshold(0.5) --> 1
# ensemble's result is '1'
pred = np.array(sess.run(pred) > config.threshold, dtype=np.int)
is_correct = tf.equal(pred, test_labels)
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
# Get Validation Loss
val_acc = accuracy.eval(
feed_dict={
input1: test_data1,input2: test_data2,
y_: test_labels,
is_training: False,
keep_prob: 1})
# Training Section
ensemble_loss = 0.
for train, bce in zip(train_steps,bce_loss):
_, loss = sess.run([train, bce],
feed_dict={
input1: data1,input2: data2,
y_: labels,
learning_rate_tf: learning_rate,
is_training: True,
keep_prob: drop_out_val
})
ensemble_loss += loss
ensemble_loss /= len(bce_loss)
nsml.report(summary=True, scope=locals(), epoch=epoch * one_batch_size + i, epoch_total=config.epochs * one_batch_size,
train__loss=float(ensemble_loss), step=epoch * one_batch_size + i)
print('Batch : ', i + 1, '/', one_batch_size, ', Batch Size:', one_batch_size ,
'BCE in this minibatch: ', float(ensemble_loss),
"Valid score:", float(val_acc) * 100,
"Learning_rate:", (learning_rate))
avg_loss += float((ensemble_loss))
avg_val_acc += float((val_acc))
print('========================================================================================')
print('epoch:', epoch, '\ntrain_loss:', float(avg_loss / (one_batch_size)),'\nvalid_acc:',
float(avg_val_acc / (one_batch_size)) * 100)
learning_rate = learning_rate * train_decay
# DONOTCHANGE (You can decide how often you want to save the model)
nsml.save(epoch)
# 로컬 테스트 모드일때 사용합니다
# 결과가 아래와 같이 나온다면, nsml submit을 통해서 제출할 수 있습니다.
# [(0.3, 0), (0.7, 1), ... ]
else:
with open(os.path.join(DATASET_PATH, 'train/train_data'), 'rt', encoding='utf-8') as f:
queries = f.readlines()
res = []
for batch in _batch_loader(queries, config.batch):
temp_res = nsml.infer(batch)
res += temp_res
print(res)
|
py | b4096928618720d22e12d97233cfb816244be9ac | # coding: utf-8
from django.db import models
from django.utils.encoding import smart_text, python_2_unicode_compatible
from django.utils.timezone import now
from markitup.fields import MarkupField
from .managers import JobsManager
@python_2_unicode_compatible
class Job(models.Model):
title = models.CharField(max_length=255, verbose_name='Başlık')
company = models.CharField(max_length=255, verbose_name='Şirket Adı')
url = models.URLField(max_length=255, verbose_name='Başvuru Linki')
description = MarkupField(verbose_name='Açıklama',
help_text="Markdown formatında yazabilirsiniz.")
location = models.CharField(max_length=255, verbose_name='Konum')
date_created = models.DateTimeField(default=now)
objects = JobsManager()
class Meta:
ordering = ["-date_created"]
@models.permalink
def get_absolute_url(self):
return 'jobs:detail', [self.pk]
def __str__(self):
return smart_text(self.title)
|
py | b40969aee0741158513eb3742af3e4b92f3837fd | """
This will be used by default if pyseabreeze isn't installed. Use this if you
don't need to measure any spectrums. Also useful as a minimal template for
implementing your own backends.
"""
"""
Every backend must contain a dictionary listing of its features. Spectrabuster
looks for the following features in this dictionary, a False value or the
absence of a key is interpreted as absence of that feature:
"""
features = {
"measure": False, # Measuring the intensities spectrum
"correct_nl": False, # Correction of non-linearity
"correct_dc": False, # Correction of dark counts
"temperature": False, # Measurement of the device's temperature
"int_time_limits": False, # Return the device's integration time limits
"sat_intensity": False, # Return the device's saturation intensity
}
class Device(object):
# {{{
"""
All of the following methods and attributes are required of the Device
class.
"""
def __init__(self, *args, **kwargs):
return None
def measure(self, **kwargs):
return None
def wavelengths(self, **kwargs):
return None
def set_int_time(self, int_time, **kwargs):
return None
@property
def int_time_limits(self):
return None
@property
def sat_intensity(self):
return None
# }}}
"""
All of the following functions are required for the backend to work.
"""
def devices():
return [Device()]
def first_available_device():
return Device()
def get_name(self):
return "none"
|
py | b40969d72d6f4e9356c195f5f0379a39e5f32410 | # Copyright 2014-2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
LOG = log.getLogger(__name__)
class AlarmsV2API(object):
def __init__(self):
super(AlarmsV2API, self).__init__()
LOG.info('Initializing AlarmsV2API!')
def on_put(self, req, res, alarm_id):
res.status = '501 Not Implemented'
def on_patch(self, req, res, alarm_id):
res.status = '501 Not Implemented'
def on_delete(self, req, res, alarm_id):
res.status = '501 Not Implemented'
def on_get(self, req, res, alarm_id):
res.status = '501 Not Implemented'
class AlarmsCountV2API(object):
def __init__(self):
super(AlarmsCountV2API, self).__init__()
def on_get(self, req, res):
res.status = '501 Not Implemented'
class AlarmsStateHistoryV2API(object):
def __init__(self):
super(AlarmsStateHistoryV2API, self).__init__()
LOG.info('Initializing AlarmsStateHistoryV2API!')
def on_get(self, req, res, alarm_id):
res.status = '501 Not Implemented'
|
py | b4096a8d3bc52b84811f36173a8b5b67073a9f6f | def truncate(text):
return text[:80] + " [...]" |
py | b4096b4730b8389da4bdc62cd700f53f27de1768 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from common import emit_console, COLUMN_SEPARATOR
def shuffler(stream, emit=emit_console):
"""
Shuffler.
"""
items = []
for line in stream:
data = line.strip().split(COLUMN_SEPARATOR, 1)
if len(data) != 2:
continue
key, value = data
items.append((key, value))
items.sort(key=lambda (k, v): k)
if emit:
for key, value in items:
emit(key, value)
|
py | b4096d9d48add93125c6d13fab62926930ecc592 | from typing import Union, Any
from casadi import MX, SX, vertcat
from ..optimization.non_linear_program import NonLinearProgram
class PenaltyNodes:
"""
A placeholder for the required elements to compute a penalty (all time)
"""
def __init__(self, ocp, nlp: NonLinearProgram, t: list, x: list, u: list, p: Union[MX, SX, list]):
"""
Parameters
----------
ocp: OptimalControlProgram
A reference to the ocp
nlp: NonLinearProgram
A reference to the current phase of the ocp
t: list
Time indices, maximum value being the number of shooting point + 1
x: list
References to the state variables
u: list
References to the control variables
p: Union[MX, SX]
References to the parameter variables
"""
self.ocp: Any = ocp
self.nlp: NonLinearProgram = nlp
self.t = t
self.x = x
self.u = u
self.p = vertcat(p)
def __len__(self):
return len(self.t)
def __iter__(self):
"""
Allow for the list to be used in a for loop
Returns
-------
A reference to self
"""
self._iter_idx = 0
return self
def __next__(self):
"""
Get the next phase of the option list
Returns
-------
The next phase of the option list
"""
self._iter_idx += 1
if self._iter_idx > len(self):
raise StopIteration
return self[self._iter_idx - 1]
def __getitem__(self, item):
return PenaltyNode(self, item)
class PenaltyNode:
"""
A placeholder for the required elements to compute a penalty (single time)
"""
def __init__(self, nodes: PenaltyNodes, shooting_index: int):
"""
Parameters
----------
nodes: PenaltyNodes
The penalty node for all the time
shooting_index: int
The index of the penalty node
"""
self.ocp: Any = nodes.ocp
self.nlp: NonLinearProgram = nodes.nlp
self.t = nodes.t[shooting_index]
self.x = nodes.x[shooting_index]
self.u = nodes.u[shooting_index] if shooting_index < len(nodes.u) else None
self.p = nodes.p
|
py | b4096ec25b428ac66226506df2cd2855bff6c619 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU Strategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import weakref
import numpy as np
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.impl import api as autograph
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import TPUClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.python.tpu import training_loop
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
def get_tpu_system_metadata(tpu_cluster_resolver):
"""Retrieves TPU system metadata given a TPUClusterResolver."""
master = tpu_cluster_resolver.master()
# pylint: disable=protected-access
cluster_spec = tpu_cluster_resolver.cluster_spec()
cluster_def = cluster_spec.as_cluster_def() if cluster_spec else None
tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata(
master,
cluster_def=cluster_def,
query_topology=False))
return tpu_system_metadata
@contextlib.contextmanager
def maybe_init_scope():
if ops.executing_eagerly_outside_functions():
yield
else:
with ops.init_scope():
yield
# TODO(jhseu): Deduplicate with MirroredStrategy?
def _create_tpu_mirrored_variable( # pylint: disable=missing-docstring
strategy, device_map, logical_device, real_mirrored_creator,
*args, **kwargs):
# Figure out what collections this variable should be added to.
# We'll add the TPUMirroredVariable to those collections instead.
var_collections = kwargs.pop("collections", None)
if var_collections is None:
var_collections = [ops.GraphKeys.GLOBAL_VARIABLES]
kwargs["collections"] = []
# TODO(jhseu): Should we have different behavior for different
# synchronization settings?
# Get aggregation value
# TODO(jhseu): Support aggregation in a replica context.
aggregation = kwargs.pop("aggregation", vs.VariableAggregation.NONE)
if aggregation not in [
vs.VariableAggregation.NONE,
vs.VariableAggregation.SUM,
vs.VariableAggregation.MEAN,
vs.VariableAggregation.ONLY_FIRST_REPLICA,
]:
raise ValueError("Invalid variable aggregation mode: {} for variable: {}"
.format(aggregation, kwargs["name"]))
# Ignore user-specified caching device, not needed for mirrored variables.
kwargs.pop("caching_device", None)
# TODO(josh11b,apassos): It would be better if variable initialization
# was never recorded on the tape instead of having to do this manually
# here.
with tape.stop_recording():
devices = device_map.logical_to_actual_devices(logical_device)
value_list = real_mirrored_creator(devices, *args, **kwargs)
result = values.TPUMirroredVariable(
strategy, device_map, value_list, aggregation,
logical_device=logical_device)
if not (context.executing_eagerly() or ops.inside_function()):
g = ops.get_default_graph()
# If "trainable" is True, next_creator() will add the member variables
# to the TRAINABLE_VARIABLES collection, so we manually remove
# them and replace with the MirroredVariable. We can't set
# "trainable" to False for next_creator() since that causes functions
# like implicit_gradients to skip those variables.
if kwargs.get("trainable", True):
var_collections.append(ops.GraphKeys.TRAINABLE_VARIABLES)
l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES)
for v in value_list:
l.remove(v)
g.add_to_collections(var_collections, result)
return result
@tf_export("distribute.experimental.TPUStrategy", v1=[])
class TPUStrategy(distribute_lib.Strategy):
"""TPU distribution strategy implementation."""
def __init__(self,
tpu_cluster_resolver=None,
device_assignment=None):
"""Initializes the TPUStrategy object.
Args:
tpu_cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
device_assignment: Optional `tf.tpu.experimental.DeviceAssignment` to
specify the placement of replicas on the TPU cluster. Currently only
supports the usecase of using a single core within a TPU cluster.
"""
super(TPUStrategy, self).__init__(TPUExtended(
self, tpu_cluster_resolver, device_assignment=device_assignment))
# TODO(cjfj): Modify `_call_for_each_replica` in `TPUExtended` such that this
# can use the default implementation.
# This implementation runs a single step. It does not use infeed or outfeed.
def experimental_run_v2(self, fn, args=(), kwargs=None):
"""See base class."""
fn = autograph.tf_convert(fn, ag_ctx.control_status_ctx())
return self.extended.tpu_run(fn, args, kwargs)
@tf_export(v1=["distribute.experimental.TPUStrategy"])
class TPUStrategyV1(distribute_lib.StrategyV1):
"""TPU distribution strategy implementation."""
def __init__(self,
tpu_cluster_resolver=None,
steps_per_run=None,
device_assignment=None):
"""Initializes the TPUStrategy object.
Args:
tpu_cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
steps_per_run: Number of steps to run on device before returning to the
host. Note that this can have side-effects on performance, hooks,
metrics, summaries etc.
This parameter is only used when Distribution Strategy is used with
estimator or keras.
device_assignment: Optional `tf.tpu.experimental.DeviceAssignment` to
specify the placement of replicas on the TPU cluster. Currently only
supports the usecase of using a single core within a TPU cluster.
"""
super(TPUStrategyV1, self).__init__(TPUExtended(
self, tpu_cluster_resolver, steps_per_run, device_assignment))
@property
def steps_per_run(self):
"""DEPRECATED: use .extended.steps_per_run instead."""
return self._extended.steps_per_run
# TODO(cjfj): Modify `_call_for_each_replica` in `TPUExtended` such that this
# can use the default implementation.
# This implementation runs a single step. It does not use infeed or outfeed.
def experimental_run_v2(self, fn, args=(), kwargs=None):
"""See base class."""
fn = autograph.tf_convert(fn, ag_ctx.control_status_ctx())
return self.extended.tpu_run(fn, args, kwargs)
# TODO(josh11b): Switch to V2 when we no longer need to support tf.compat.v1.
class TPUExtended(distribute_lib.StrategyExtendedV1):
"""Implementation of TPUStrategy."""
def __init__(self,
container_strategy,
tpu_cluster_resolver=None,
steps_per_run=None,
device_assignment=None):
super(TPUExtended, self).__init__(container_strategy)
if tpu_cluster_resolver is None:
tpu_cluster_resolver = TPUClusterResolver("")
if steps_per_run is None:
# TODO(frankchn): Warn when we are being used by DS/Keras and this is
# not specified.
steps_per_run = 1
self._tpu_function_cache = weakref.WeakKeyDictionary()
self._tpu_cluster_resolver = tpu_cluster_resolver
self._tpu_metadata = get_tpu_system_metadata(self._tpu_cluster_resolver)
self._device_assignment = device_assignment
# Device assignment is currently only supported for 1 core case.
if self._device_assignment:
assert isinstance(self._device_assignment,
device_assignment_lib.DeviceAssignment)
if self._device_assignment.num_replicas != 1:
raise ValueError("Device assignment is only supported for a single "
"core single replica case currently.")
if self._device_assignment.num_cores_per_replica != 1:
raise ValueError("Device assignment is only supported for a single "
"core single replica case currently.")
if not all(self._device_assignment.core_assignment[0][0] == [0, 0, 0]):
raise ValueError("Device assignment is only supported for a single "
"core single replica case currently.")
# TODO(jhseu): Switch to DeviceAssignment to support pods and model
# parallelism.
self._tpu_devices = [d.name for d in self._tpu_metadata.devices
if "device:TPU:" in d.name]
self._host_device = device_util.get_host_for_device(self._tpu_devices[0])
# Only create variables for the number of replicas we're running.
self._tpu_devices = self._tpu_devices[:self._num_replicas_in_sync]
self._device_map = values.ReplicaDeviceMap(self._tpu_devices)
# Preload the data onto the TPUs.
input_worker_devices = collections.OrderedDict()
for tpu_device in self._tpu_devices:
host_device = device_util.get_host_for_device(tpu_device)
input_worker_devices.setdefault(host_device, [])
input_worker_devices[host_device].append(tpu_device)
self._input_workers = input_lib.InputWorkers(
self._device_map, tuple(input_worker_devices.items()))
# TODO(sourabhbajaj): Remove this once performance of running one step
# at a time is comparable to multiple steps.
self.steps_per_run = steps_per_run
self._require_static_shapes = True
self.experimental_enable_get_next_as_optional = True
def _validate_colocate_with_variable(self, colocate_with_variable):
values.validate_colocate_tpu_variable(colocate_with_variable, self)
def _make_dataset_iterator(self, dataset):
"""Make iterators for each of the TPU hosts."""
return input_lib.DatasetIterator(
dataset,
self._input_workers,
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
input_contexts = []
num_workers = self._input_workers.num_workers
for i in range(num_workers):
input_contexts.append(distribute_lib.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=i,
num_replicas_in_sync=self._num_replicas_in_sync))
return input_lib.InputFunctionIterator(
input_fn,
self._input_workers,
input_contexts,
self._container_strategy())
def _experimental_make_numpy_dataset(self, numpy_input, session):
return numpy_dataset.one_host_numpy_dataset(
numpy_input, numpy_dataset.SingleDevice(self._host_device),
session)
def _experimental_distribute_dataset(self, dataset):
return input_lib.get_distributed_dataset(
dataset,
self._input_workers,
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync)
def _experimental_distribute_datasets_from_function(self, dataset_fn):
input_contexts = []
num_workers = self._input_workers.num_workers
for i in range(num_workers):
input_contexts.append(distribute_lib.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=i,
num_replicas_in_sync=self._num_replicas_in_sync))
return input_lib.DistributedDatasetsFromFunction(
dataset_fn,
self._input_workers,
input_contexts,
self._container_strategy())
# TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.
# TODO(sourabhbajaj): Remove the initial_loop_values parameter when we have
# a mechanism to infer the outputs of `fn`. Pending b/110550782.
def _experimental_run_steps_on_iterator(
self, fn, multi_worker_iterator, iterations, initial_loop_values=None):
# Wrap `fn` for repeat.
if initial_loop_values is None:
initial_loop_values = {}
initial_loop_values = nest.flatten(initial_loop_values)
ctx = input_lib.MultiStepContext()
def run_fn(inputs):
"""Single step on the TPU device."""
fn_result = fn(ctx, inputs)
flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)
if flat_last_step_outputs:
with ops.control_dependencies([fn_result]):
return [array_ops.identity(f) for f in flat_last_step_outputs]
else:
return fn_result
# We capture the control_flow_context at this point, before we run `fn`
# inside a while_loop and TPU replicate context. This is useful in cases
# where we might need to exit these contexts and get back to the outer
# context to do some things, for e.g. create an op which should be
# evaluated only once at the end of the loop on the host. One such usage
# is in creating metrics' value op.
self._outer_control_flow_context = (
ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access
def rewrite_fn(*args):
"""The rewritten step fn running on TPU."""
del args
per_replica_inputs = multi_worker_iterator.get_next()
replicate_inputs = []
for replica_id in range(self._num_replicas_in_sync):
select_replica = lambda x: values.select_replica(replica_id, x) # pylint: disable=cell-var-from-loop
replicate_inputs.append((nest.map_structure(
select_replica, per_replica_inputs),))
replicate_outputs = tpu.replicate(
run_fn, replicate_inputs, device_assignment=self._device_assignment)
# If run_fn has tensor outputs, tpu.replicate returns a list of list. We
# will flatten it in this case. If run_fn has no tensor outputs,
# tpu.replicate returns a list of no_ops, we will keep the output as it
# is.
if isinstance(replicate_outputs[0], list):
replicate_outputs = nest.flatten(replicate_outputs)
return replicate_outputs
# TODO(sourabhbajaj): The input to while loop should be based on the
# output type of the step_fn
assert isinstance(initial_loop_values, list)
initial_loop_values = initial_loop_values * self._num_replicas_in_sync
# Put the while loop op on TPU host 0.
with ops.device(self._host_device):
if self.steps_per_run == 1:
replicate_outputs = rewrite_fn()
else:
replicate_outputs = training_loop.repeat(iterations, rewrite_fn,
initial_loop_values)
del self._outer_control_flow_context
ctx.run_op = control_flow_ops.group(replicate_outputs)
if isinstance(replicate_outputs, list):
# Filter out any ops from the outputs, typically this would be the case
# when there were no tensor outputs.
last_step_tensor_outputs = [
x for x in replicate_outputs if not isinstance(x, ops.Operation)
]
# Outputs are currently of the structure (flattened)
# [output0_device0, output1_device0, output2_device0,
# output0_device1, output1_device1, output2_device1,
# ...]
# Convert this to the following structure instead: (grouped by output)
# [[output0_device0, output0_device1],
# [output1_device0, output1_device1],
# [output2_device0, output2_device1]]
output_num = len(last_step_tensor_outputs) // self._num_replicas_in_sync
last_step_tensor_outputs = [
last_step_tensor_outputs[i::output_num] for i in range(output_num)
]
else:
# no tensors returned.
last_step_tensor_outputs = []
_set_last_step_outputs(ctx, last_step_tensor_outputs)
return ctx
def _call_for_each_replica(self, fn, args, kwargs):
# TODO(jhseu): Consider making it so call_for_each_replica implies that
# we're in a tpu.rewrite(), and update TPUMirroredVariable accordingly.
with _TPUReplicaContext(self._container_strategy()):
return fn(*args, **kwargs)
def _experimental_initialize_system(self):
"""Experimental method added to be used by Estimator.
This is a private method only to be used by Estimator. Other frameworks
should directly be calling `tf.tpu.experimental.initialize_tpu_system`
"""
tpu_strategy_util.initialize_tpu_system(self._tpu_cluster_resolver)
def _create_variable(self, next_creator, *args, **kwargs):
"""Create a TPUMirroredVariable. See `DistributionStrategy.scope`."""
colocate_with = kwargs.pop("colocate_with", None)
if colocate_with is None:
device_map = self._device_map
logical_device = 0 # TODO(josh11b): Get logical device from scope here.
elif isinstance(colocate_with, numpy_dataset.SingleDevice):
with ops.device(colocate_with.device):
return next_creator(*args, **kwargs)
else:
device_map = colocate_with.device_map
logical_device = colocate_with.logical_device
def _real_mirrored_creator(devices, *args, **kwargs): # pylint: disable=g-missing-docstring
initial_value = None
value_list = []
for i, d in enumerate(devices):
with ops.device(d):
if i == 0:
initial_value = kwargs["initial_value"]
# Note: some v1 code expects variable initializer creation to happen
# inside a init_scope.
with maybe_init_scope():
initial_value = initial_value() if callable(
initial_value) else initial_value
if i > 0:
# Give replicas meaningful distinct names:
var0name = value_list[0].name.split(":")[0]
# We append a / to variable names created on replicas with id > 0 to
# ensure that we ignore the name scope and instead use the given
# name as the absolute name of the variable.
kwargs["name"] = "%s/replica_%d/" % (var0name, i)
kwargs["initial_value"] = initial_value
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
v = next_creator(*args, **kwargs)
assert not isinstance(v, values.TPUMirroredVariable)
value_list.append(v)
return value_list
return _create_tpu_mirrored_variable(
self._container_strategy(), device_map, logical_device,
_real_mirrored_creator, *args, **kwargs)
def _reduce_to(self, reduce_op, value, destinations):
if values._enclosing_tpu_context() is not None: # pylint: disable=protected-access
if reduce_op == reduce_util.ReduceOp.MEAN:
# TODO(jhseu): Revisit once we support model-parallelism.
value *= (1. / self._num_replicas_in_sync)
elif reduce_op != reduce_util.ReduceOp.SUM:
raise NotImplementedError(
"Currently only support sum & mean in TPUStrategy.")
return tpu_ops.cross_replica_sum(value)
if not isinstance(value, values.DistributedValues):
# This function handles reducing values that are not PerReplica or
# Mirrored values. For example, the same value could be present on all
# replicas in which case `value` would be a single value or value could
# be 0.
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, self._device_map, value, destinations)
# TODO(cjfj): Detect when it is possible to use `cross_replica_sum`.
# Always performs the reduction on the TPU host.
with ops.device(self._host_device):
output = math_ops.add_n(value.values)
if reduce_op == reduce_util.ReduceOp.MEAN:
output *= (1. / len(value.values))
devices = cross_device_ops_lib.get_devices_from(destinations)
if len(devices) == 1:
# If necessary, copy to requested destination.
dest_canonical = device_util.canonicalize(devices[0])
host_canonical = device_util.canonicalize(self._host_device)
if dest_canonical != host_canonical:
with ops.device(dest_canonical):
output = array_ops.identity(output)
else:
output = cross_device_ops_lib.simple_broadcast(output, destinations)
return output
def _update(self, var, fn, args, kwargs, group):
assert isinstance(var, values.TPUMirroredVariable) or isinstance(
var, resource_variable_ops.BaseResourceVariable)
if values._enclosing_tpu_context() is not None: # pylint: disable=protected-access
if group:
return fn(var, *args, **kwargs)
else:
return (fn(var, *args, **kwargs),)
# Otherwise, we revert to MirroredStrategy behavior and update each variable
# directly.
updates = []
for i, (d, v) in enumerate(zip(var.devices, var.values)):
name = "update_%d" % i
with ops.device(d), distribute_lib.UpdateContext(d), ops.name_scope(name):
# If args and kwargs are not mirrored, the value is returned as is.
updates.append(fn(v,
*values.select_device_mirrored(d, args),
**values.select_device_mirrored(d, kwargs)))
return values.update_regroup(self, self._device_map, updates, group)
def read_var(self, var):
assert isinstance(var, values.TPUMirroredVariable) or isinstance(
var, resource_variable_ops.BaseResourceVariable)
return var.read_value()
def _local_results(self, val):
if isinstance(val, values.DistributedValues):
# Return in a deterministic order.
return tuple(val.get(device=d) for d in sorted(val.devices))
elif isinstance(val, list):
# TODO(josh11b): We need to remove this case; per device values should
# be represented using a PerReplica wrapper instead of a list with
# one entry per device.
return tuple(val)
elif isinstance(val, values.TPUMirroredVariable):
# pylint: disable=protected-access
if values._enclosing_tpu_context() is not None:
return (val,)
return val.values
return (val,)
def value_container(self, value):
return value
def _broadcast_to(self, tensor, destinations):
del destinations
return tensor
@property
def num_hosts(self):
if self._device_assignment is None:
return self._tpu_metadata.num_hosts
return len(set([self._device_assignment.host_device(r)
for r in range(self._device_assignment.num_replicas)]))
@property
def num_replicas_per_host(self):
if self._device_assignment is None:
return self._tpu_metadata.num_of_cores_per_host
# TODO(sourabhbajaj): Remove this method we use inputs and remove infeed
# as the computation of num_replicas_per_host is not a constant
# when using device_assignment. This is a temporary workaround to support
# StatefulRNN as everything is 1 in that case.
# This method needs to take host_id as input for correct computation.
max_models_per_host = (self._tpu_metadata.num_of_cores_per_host //
self._device_assignment.num_cores_per_replica)
models_per_host = min(self._device_assignment.num_replicas,
max_models_per_host)
return models_per_host * self._device_assignment.num_cores_per_replica
@property
def _num_replicas_in_sync(self):
if self._device_assignment is None:
return self._tpu_metadata.num_cores
return (self._device_assignment.num_replicas *
self._device_assignment.num_cores_per_replica)
@property
def experimental_between_graph(self):
return False
@property
def experimental_should_init(self):
return True
@property
def should_checkpoint(self):
return True
@property
def should_save_summary(self):
return True
@property
def worker_devices(self):
return self._tpu_devices
@property
def parameter_devices(self):
return self._tpu_devices
def non_slot_devices(self, var_list):
return self._host_device
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
del colocate_with
with ops.device(self._host_device), distribute_lib.UpdateContext(
self._host_device):
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._local_results, result)
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
del cluster_spec, task_type, task_id
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
updated_config.isolate_session_state = True
cluster_spec = self._tpu_cluster_resolver.cluster_spec()
if cluster_spec:
updated_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
return updated_config
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""`make_dataset_iterator` and `make_numpy_iterator` use global batch size.
`make_input_fn_iterator` assumes per-replica batching.
Returns:
Boolean.
"""
return True
def tpu_run(self, fn, args, kwargs):
func = self._tpu_function_creator(fn)
return func(args, kwargs)
def _tpu_function_creator(self, fn):
if fn in self._tpu_function_cache:
return self._tpu_function_cache[fn]
strategy = self._container_strategy()
def tpu_function(args, kwargs):
"""TF Function used to replicate the user computation."""
if kwargs is None:
kwargs = {}
# Remove None at the end of args as they are not replicatable
# If there are None in the middle we can't do anything about it
# so let those cases fail.
# For example when Keras model predict is used they pass the targets as
# None. We want to handle it here so all client libraries don't have to
# do this as other strategies can handle None values better.
while args and args[-1] is None:
args = args[:-1]
# Used to re-structure flattened output tensors from `tpu.replicate()`
# into a structured format.
result = [[]]
def replicated_fn(replica_id, replica_args, replica_kwargs):
"""Wraps user function to provide replica ID and `Tensor` inputs."""
with _TPUReplicaContext(strategy, replica_id_in_sync_group=replica_id):
result[0] = fn(*replica_args, **replica_kwargs)
return result[0]
replicate_inputs = [] # By replica.
for i in range(strategy.num_replicas_in_sync):
replicate_inputs.append(
[constant_op.constant(i, dtype=dtypes.int32),
values.select_replica(i, args),
values.select_replica(i, kwargs)])
# Construct and pass `maximum_shapes` so that we could support dynamic
# shapes using dynamic padder.
if replicate_inputs:
maximum_shapes = []
flattened_list = nest.flatten(replicate_inputs[0])
for input_tensor in flattened_list:
if tensor_util.is_tensor(input_tensor):
maximum_shape = input_tensor.get_shape()
else:
maximum_shape = tensor_shape.TensorShape(np.shape(input_tensor))
maximum_shapes.append(maximum_shape)
maximum_shapes = nest.pack_sequence_as(replicate_inputs[0],
maximum_shapes)
else:
maximum_shapes = None
with strategy.scope():
replicate_outputs = tpu.replicate(
replicated_fn,
replicate_inputs,
device_assignment=self._device_assignment,
maximum_shapes=maximum_shapes)
# Remove all no ops that may have been added during 'tpu.replicate()'
if isinstance(result[0], list):
result[0] = [
output for output in result[0] if tensor_util.is_tensor(output)
]
# Workaround for `tpu.replicate` behaviour when single `Tensor` returned.
if result[0] is None:
replicate_outputs = [None] * len(replicate_outputs)
else:
replicate_outputs = [
nest.pack_sequence_as(result[0], nest.flatten(replica_output))
for replica_output in replicate_outputs
]
device_map = self._device_map # pylint: disable=protected-access
return values.regroup(device_map, replicate_outputs)
if context.executing_eagerly():
tpu_function = def_function.function(tpu_function)
self._tpu_function_cache[fn] = tpu_function
return tpu_function
class _TPUReplicaContext(distribute_lib.ReplicaContext):
"""Replication Context class for TPU Strategy."""
# TODO(sourabhbajaj): Call for each replica should be updating this.
# TODO(b/118385803): Always properly initialize replica_id.
def __init__(self, strategy, replica_id_in_sync_group=None):
if replica_id_in_sync_group is None:
replica_id_in_sync_group = constant_op.constant(0, dtypes.int32)
distribute_lib.ReplicaContext.__init__(
self, strategy, replica_id_in_sync_group=replica_id_in_sync_group)
@property
def devices(self):
distribute_lib.require_replica_context(self)
ds = self._strategy
replica_id = tensor_util.constant_value(self._replica_id_in_sync_group)
if replica_id is None: # Non-constant `Tensor` inside `tpu.replicate`.
# TODO(cjfj): Return other devices when model parallelism is supported.
return (tpu.core(0),)
else:
return (ds.extended.worker_devices[replica_id],)
def _set_last_step_outputs(ctx, last_step_tensor_outputs):
"""Sets the last step outputs on the given context."""
# Convert replicate_outputs to the original dict structure of
# last_step_outputs.
last_step_tensor_outputs_dict = nest.pack_sequence_as(
ctx.last_step_outputs, last_step_tensor_outputs)
for name, reduce_op in ctx._last_step_outputs_reduce_ops.items(): # pylint: disable=protected-access
output = last_step_tensor_outputs_dict[name]
# For outputs that have already been reduced, take the first value
# from the list as each value should be the same. Else return the full
# list of values.
# TODO(josh11b): If reduce_op is NONE, we should return a PerReplica
# value.
if reduce_op is not None:
# TODO(priyag): Should this return the element or a list with 1 element
last_step_tensor_outputs_dict[name] = output[0]
ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access
|
py | b4096fa8efe4987a0f7a8b77d0efa0739f771947 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
__author__ = 'colddew'
import os
import shutil
def make_succulent_train_folder(root_path):
# for root, dirs, files in os.walk(root_path):
# # print(root) # 当前目录路径
# # print(dirs) # 当前路径下所有子目录
# # print(files) # 当前路径下所有非目录子文件
# for f in files:
# if os.path.splitext(f)[1] == '.jpg':
# print(os.path.join(root, f))
# 只处理当前文件夹根目录下的图片
if(os.path.exists(root_path)):
files = os.listdir(root_path)
for f in files:
old_path = os.path.join(root_path, f)
print old_path
if (os.path.isfile(old_path) and os.path.splitext(old_path)[1] == '.jpg'):
folder = os.path.split(old_path)[1].split('-')[0]
sub_path = mkDir(root_path, folder)
new_path = os.path.join(sub_path, f)
shutil.move(old_path, new_path)
def mkDir(root_path, folder):
dir_path = root_path + folder
exists = os.path.exists(dir_path)
if not exists:
os.makedirs(dir_path)
return dir_path
else:
return dir_path
def rename_all_succulent_train_file(root_path):
n = 0
for parent, dirnames, filenames in os.walk(root_path):
for dirname in dirnames:
# print "parent is: " + parent
# print "dirname is: " + dirname
# n += 1
# print n
rename_current_path_succulent_train_file(os.path.join(parent, dirname))
def rename_current_path_succulent_train_file(current_path):
n = 0
for parent, dirnames, filenames in os.walk(current_path):
for filename in filenames:
if filename != '.DS_Store':
# print "parent is: " + parent
# print "filename is: " + filename
old_path = os.path.join(parent, filename)
print old_path
n += 1
new_file_name = os.path.split(parent)[1] + '-' + str(n) + '.jpg'
new_path = os.path.join(parent, new_file_name)
print new_path
shutil.move(old_path, new_path)
# make_succulent_train_folder('/Users/anmy/Downloads/pic/succulent-train/')
# rename_all_succulent_train_file('/Users/anmy/Downloads/pic/succulent-train/')
|
py | b409708f3502d591172d80e9c54181b268f3ac50 | from pathlib import Path
from music21 import *
p = Path('/Users/Cuthbert/Desktop/Norman_Schmidt_Chorales')
pOut = p.parent / 'Out_Chorales'
def run():
files = list(p.iterdir())
filenames = [fp.name for fp in files]
pos = 0
for c in corpus.chorales.Iterator():
cName = c.filePath.name
if '.krn' in cName:
continue
if '.xml' in cName:
cName = cName.replace('.xml', '.mxl')
if cName not in filenames:
print('skipping', cName)
continue
pos += 1
runOne(c, cName)
def runOne(c, cName):
pName = p / cName
newScore = converter.parse(pName)
newScore.metadata.composer = 'J.S. Bach'
allSuffixesByPart = set()
for part in newScore.parts:
priorMeasure = None
priorMeasureWasIncomplete = False
priorMeasureDuration = 0.0
measureNumberShift = 0
partNumSuffix = []
for m in part.getElementsByClass('Measure'):
mn = m.number
ms = m.numberSuffix
mns = m.measureNumberWithSuffix()
ts = m.timeSignature or m.getContextByClass('TimeSignature')
if ts is None:
print("No time signature context!", cName, part.id, mns)
continue
barQl = ts.barDuration.quarterLength
mQl = m.duration.quarterLength
short = barQl - mQl
perfect = True if short == 0 else False
pickup = True if not perfect and short > (barQl / 2) else False
truncated = True if not perfect and short < (barQl / 2) else False
half = True if not perfect and short == (barQl / 2) else False
if half and priorMeasureWasIncomplete==False:
priorMeasureWasIncomplete = True
priorMeasureDuration = mQl
priorMeasure = m
m.number = mn - measureNumberShift
partNumSuffix.append((mn - measureNumberShift, ms))
elif half and priorMeasureWasIncomplete and priorMeasureDuration == short:
priorMeasureWasIncomplete = False
m.paddingLeft = short
priorMeasure = m
priorMeasureDuration = mQl
measureNumberShift += 1
if ms is None:
ms = 'a'
else:
ms = ms + 'a'
m.number = mn - measureNumberShift
m.numberSuffix = ms
partNumSuffix.append((mn - measureNumberShift, ms))
elif perfect:
priorMeasureWasIncomplete = False
priorMeasureDuration = mQl
priorMeasure = m
m.number = mn - measureNumberShift
partNumSuffix.append((mn - measureNumberShift, ms))
elif pickup and priorMeasure is None:
# pickup measure 1
partNumSuffix.append((0, ms))
m.number = 0
measureNumberShift += 1
elif truncated and priorMeasureWasIncomplete and priorMeasureDuration == short:
print("Truncated measure following pickup...", cName, part.id, mn)
priorMeasure.paddingRight = priorMeasure.paddingLeft
priorMeasure.paddingLeft = 0
measureNumberShift += 1
priorMeasure = m
priorMeasureDuration = mQl
if ms is None:
ms = 'x'
else:
ms = ms + 'x'
m.number = mn - measureNumberShift
m.numberSuffix = ms
partNumSuffix.append((mn - measureNumberShift, ms))
elif truncated:
priorMeasureWasIncomplete = True
m.paddingRight = short
priorMeasure = m
priorMeasureDuration = mQl
m.number = mn - measureNumberShift
partNumSuffix.append((mn - measureNumberShift, ms))
elif pickup and not priorMeasureWasIncomplete:
print("Pickup following complete prior measure", cName, part.id, mn)
priorMeasureWasIncomplete = True
m.paddingLeft = short
priorMeasure = m
priorMeasureDuration = mQl
m.number = mn - measureNumberShift
partNumSuffix.append((mn - measureNumberShift, ms))
elif pickup and priorMeasureWasIncomplete and priorMeasureDuration == short:
# good, matched up!
priorMeasureWasIncomplete = True
m.paddingLeft = short
priorMeasure = m
priorMeasureDuration = mQl
measureNumberShift += 1
if ms is None:
ms = 'a'
else:
ms = ms + 'a'
m.number = mn - measureNumberShift
m.numberSuffix = ms
partNumSuffix.append((mn - measureNumberShift, ms))
elif pickup and priorMeasureWasIncomplete and ts is not priorMeasure.timeSignature:
print("Changing TS Pickup", cName, part.id, mn)
priorMeasureWasIncomplete = True
m.paddingLeft = short
priorMeasure = m
priorMeasureDuration = mQl
measureNumberShift += 1
m.number = mn - measureNumberShift
partNumSuffix.append((mn - measureNumberShift, ms))
partSuffixesTuple = tuple(partNumSuffix)
allSuffixesByPart.add(partSuffixesTuple)
if len(allSuffixesByPart) != 1:
print("Multiple conflicting measures!", cName)
print(cName, allSuffixesByPart)
try:
kOrig = c.recurse().getElementsByClass('KeySignature')[0]
kNew = newScore.recurse().getElementsByClass('KeySignature')[0]
sKOrig = str(kOrig)
sKNew = str(kNew)
if kOrig.sharps != kNew.sharps:
print("Key changed from", kOrig, kNew)
if sKNew != sKOrig:
kNew.activeSite.replace(kNew, kOrig)
analysisKey = newScore.analyze('key')
print('Mode would have been changed from ', sKOrig, sKNew)
if str(analysisKey) != sKOrig:
print("Key mismatch: ", sKOrig, sKNew, str(analysisKey))
except IndexError:
print('no key in ', cName)
fNewXml = pOut / (cName.replace('.mxl', '.xml'))
newScore.write(fp=fNewXml)
musicxml.archiveTools.compressXML(str(fNewXml), deleteOriginal=True)
# for i, pOrig in enumerate(c.parts):
# expander = repeat.Expander(pOrig)
# if not expander.isExpandable():
# #print('incoherent repeats', cName)
# try:
# pOrig = expander.process()
# except Exception:
# pass
#
# pNew = newScore.parts[i]
# expander = repeat.Expander(pNew)
# if not expander.isExpandable():
# #print('incoherent repeats', cName)
# try:
# pNew = expander.process()
# except Exception:
# pass
#
# origPitches = tuple([p.nameWithOctave for p in pOrig.pitches])
# newPitches = tuple([p.nameWithOctave for p in pNew.pitches])
# if origPitches != newPitches:
# print(cName, pOrig.id, len(origPitches), len(newPitches))
# pNew.show()
# for i, thisP in enumerate(origPitches):
# try:
# newP = newPitches[i]
# except IndexError:
# continue
# if thisP != newP:
# print(i, thisP, newP)
if __name__ == '__main__':
run()
|
py | b40970cb2067a9d90a23f44b76702a357118d22f | applyPatch('20200413-dldt-disable-unused-targets.patch')
applyPatch('20200413-dldt-fix-binaries-location.patch')
applyPatch('20200413-dldt-pdb.patch')
applyPatch('20200415-ngraph-disable-unused-options.patch')
|
py | b409711cf6fd029aaedc591d35a818ac9414a53f | # #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <HTTPretty - HTTP client mock for Python>
# Copyright (C) <2011-2013> Gabriel Falcão <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import re
import codecs
import inspect
import socket
import functools
import itertools
import warnings
import logging
import traceback
import json
import contextlib
from .compat import (
PY3,
StringIO,
text_type,
BaseClass,
BaseHTTPRequestHandler,
quote,
quote_plus,
urlunsplit,
urlsplit,
parse_qs,
unquote,
unquote_utf8,
ClassTypes,
basestring
)
from .http import (
STATUSES,
HttpBaseClass,
parse_requestline,
last_requestline,
)
from .utils import (
utf8,
decode_utf8,
)
from .errors import HTTPrettyError, UnmockedError
from datetime import datetime
from datetime import timedelta
from errno import EAGAIN
old_socket = socket.socket
old_create_connection = socket.create_connection
old_gethostbyname = socket.gethostbyname
old_gethostname = socket.gethostname
old_getaddrinfo = socket.getaddrinfo
old_socksocket = None
old_ssl_wrap_socket = None
old_sslwrap_simple = None
old_sslsocket = None
if PY3: # pragma: no cover
basestring = (bytes, str)
try: # pragma: no cover
import socks
old_socksocket = socks.socksocket
except ImportError:
socks = None
try: # pragma: no cover
import ssl
old_ssl_wrap_socket = ssl.wrap_socket
if not PY3:
old_sslwrap_simple = ssl.sslwrap_simple
old_sslsocket = ssl.SSLSocket
except ImportError: # pragma: no cover
ssl = None
DEFAULT_HTTP_PORTS = frozenset([80])
POTENTIAL_HTTP_PORTS = set(DEFAULT_HTTP_PORTS)
DEFAULT_HTTPS_PORTS = frozenset([443])
POTENTIAL_HTTPS_PORTS = set(DEFAULT_HTTPS_PORTS)
class HTTPrettyRequest(BaseHTTPRequestHandler, BaseClass):
"""Represents a HTTP request. It takes a valid multi-line, `\r\n`
separated string with HTTP headers and parse them out using the
internal `parse_request` method.
It also replaces the `rfile` and `wfile` attributes with StringIO
instances so that we garantee that it won't make any I/O, neighter
for writing nor reading.
It has some convenience attributes:
`headers` -> a mimetype object that can be cast into a dictionary,
contains all the request headers
`method` -> the HTTP method used in this request
`querystring` -> a dictionary containing lists with the
attributes. Please notice that if you need a single value from a
query string you will need to get it manually like:
```python
>>> request.querystring
{'name': ['Gabriel Falcao']}
>>> print request.querystring['name'][0]
```
`parsed_body` -> a dictionary containing parsed request body or
None if HTTPrettyRequest doesn't know how to parse it. It
currently supports parsing body data that was sent under the
`content-type` headers values: 'application/json' or
'application/x-www-form-urlencoded'
"""
def __init__(self, headers, body=''):
# first of all, lets make sure that if headers or body are
# unicode strings, it must be converted into a utf-8 encoded
# byte string
self.raw_headers = utf8(headers.strip())
self.body = utf8(body)
# Now let's concatenate the headers with the body, and create
# `rfile` based on it
self.rfile = StringIO(b'\r\n\r\n'.join([self.raw_headers, self.body]))
self.wfile = StringIO() # Creating `wfile` as an empty
# StringIO, just to avoid any real
# I/O calls
# parsing the request line preemptively
self.raw_requestline = self.rfile.readline()
# initiating the error attributes with None
self.error_code = None
self.error_message = None
# Parse the request based on the attributes above
if not self.parse_request():
return
# making the HTTP method string available as the command
self.method = self.command
# Now 2 convenient attributes for the HTTPretty API:
# `querystring` holds a dictionary with the parsed query string
try:
self.path = self.path.encode('iso-8859-1')
except UnicodeDecodeError:
pass
self.path = decode_utf8(self.path)
qstring = self.path.split("?", 1)[-1]
self.querystring = self.parse_querystring(qstring)
# And the body will be attempted to be parsed as
# `application/json` or `application/x-www-form-urlencoded`
self.parsed_body = self.parse_request_body(self.body)
def __str__(self):
return '<HTTPrettyRequest("{0}", total_headers={1}, body_length={2})>'.format(
self.headers.get('content-type', ''),
len(self.headers),
len(self.body),
)
def parse_querystring(self, qs):
expanded = unquote_utf8(qs)
parsed = parse_qs(expanded)
result = {}
for k in parsed:
result[k] = list(map(decode_utf8, parsed[k]))
return result
def parse_request_body(self, body):
""" Attempt to parse the post based on the content-type passed. Return the regular body if not """
PARSING_FUNCTIONS = {
'application/json': json.loads,
'text/json': json.loads,
'application/x-www-form-urlencoded': self.parse_querystring,
}
FALLBACK_FUNCTION = lambda x: x
content_type = self.headers.get('content-type', '')
do_parse = PARSING_FUNCTIONS.get(content_type, FALLBACK_FUNCTION)
try:
body = decode_utf8(body)
return do_parse(body)
except:
return body
class EmptyRequestHeaders(dict):
pass
class HTTPrettyRequestEmpty(object):
body = ''
headers = EmptyRequestHeaders()
class FakeSockFile(StringIO):
def close(self):
self.socket.close()
StringIO.close(self)
class FakeSSLSocket(object):
def __init__(self, sock, *args, **kw):
self._httpretty_sock = sock
def __getattr__(self, attr):
return getattr(self._httpretty_sock, attr)
class fakesock(object):
class socket(object):
_entry = None
debuglevel = 0
_sent_data = []
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM,
protocol=0):
self.truesock = (old_socket(family, type, protocol)
if httpretty.allow_net_connect
else None)
self._closed = True
self.fd = FakeSockFile()
self.fd.socket = self
self.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
self._sock = self
self.is_http = False
self._bufsize = 1024
def getpeercert(self, *a, **kw):
now = datetime.now()
shift = now + timedelta(days=30 * 12)
return {
'notAfter': shift.strftime('%b %d %H:%M:%S GMT'),
'subjectAltName': (
('DNS', '*%s' % self._host),
('DNS', self._host),
('DNS', '*'),
),
'subject': (
(
('organizationName', '*.%s' % self._host),
),
(
('organizationalUnitName',
'Domain Control Validated'),
),
(
('commonName', '*.%s' % self._host),
),
),
}
def ssl(self, sock, *args, **kw):
return sock
def setsockopt(self, level, optname, value):
if self.truesock:
self.truesock.setsockopt(level, optname, value)
def connect(self, address):
self._closed = False
try:
self._address = (self._host, self._port) = address
except ValueError:
# We get here when the address is just a string pointing to a
# unix socket path/file
#
# See issue #206
self.is_http = False
else:
self.is_http = self._port in POTENTIAL_HTTP_PORTS | POTENTIAL_HTTPS_PORTS
if not self.is_http:
if self.truesock:
self.truesock.connect(self._address)
else:
raise UnmockedError()
def close(self):
if not (self.is_http and self._closed):
if self.truesock:
self.truesock.close()
self._closed = True
def makefile(self, mode='r', bufsize=-1):
"""Returns this fake socket's own StringIO buffer.
If there is an entry associated with the socket, the file
descriptor gets filled in with the entry data before being
returned.
"""
self._mode = mode
self._bufsize = bufsize
if self._entry:
self._entry.fill_filekind(self.fd)
return self.fd
def real_sendall(self, data, *args, **kw):
"""Sends data to the remote server. This method is called
when HTTPretty identifies that someone is trying to send
non-http data.
The received bytes are written in this socket's StringIO
buffer so that HTTPretty can return it accordingly when
necessary.
"""
if not self.truesock:
raise UnmockedError()
if not self.is_http:
return self.truesock.sendall(data, *args, **kw)
self.truesock.connect(self._address)
self.truesock.setblocking(1)
self.truesock.sendall(data, *args, **kw)
should_continue = True
while should_continue:
try:
received = self.truesock.recv(self._bufsize)
self.fd.write(received)
should_continue = len(received) == self._bufsize
except socket.error as e:
if e.errno == EAGAIN:
continue
break
self.fd.seek(0)
def sendall(self, data, *args, **kw):
self._sent_data.append(data)
self.fd = FakeSockFile()
self.fd.socket = self
try:
requestline, _ = data.split(b'\r\n', 1)
method, path, version = parse_requestline(
decode_utf8(requestline))
is_parsing_headers = True
except ValueError:
is_parsing_headers = False
if not self._entry:
# If the previous request wasn't mocked, don't mock the
# subsequent sending of data
return self.real_sendall(data, *args, **kw)
self.fd.seek(0)
if not is_parsing_headers:
if len(self._sent_data) > 1:
headers = utf8(last_requestline(self._sent_data))
meta = self._entry.request.headers
body = utf8(self._sent_data[-1])
if meta.get('transfer-encoding', '') == 'chunked':
if not body.isdigit() and body != b'\r\n' and body != b'0\r\n\r\n':
self._entry.request.body += body
else:
self._entry.request.body += body
httpretty.historify_request(headers, body, False)
return
# path might come with
s = urlsplit(path)
POTENTIAL_HTTP_PORTS.add(int(s.port or 80))
headers, body = list(map(utf8, data.split(b'\r\n\r\n', 1)))
request = httpretty.historify_request(headers, body)
info = URIInfo(hostname=self._host, port=self._port,
path=s.path,
query=s.query,
last_request=request)
matcher, entries = httpretty.match_uriinfo(info)
if not entries:
self._entry = None
self.real_sendall(data)
return
self._entry = matcher.get_next_entry(method, info, request)
def debug(self, truesock_func, *a, **kw):
if self.is_http:
frame = inspect.stack()[0][0]
lines = list(map(utf8, traceback.format_stack(frame)))
message = [
"HTTPretty intercepted and unexpected socket method call.",
("Please open an issue at "
"'https://github.com/gabrielfalcao/HTTPretty/issues'"),
"And paste the following traceback:\n",
"".join(decode_utf8(lines)),
]
raise RuntimeError("\n".join(message))
if not self.truesock:
raise UnmockedError()
return getattr(self.truesock, truesock_func)(*a, **kw)
def settimeout(self, new_timeout):
self.timeout = new_timeout
def send(self, *args, **kwargs):
return self.debug('send', *args, **kwargs)
def sendto(self, *args, **kwargs):
return self.debug('sendto', *args, **kwargs)
def recvfrom_into(self, *args, **kwargs):
return self.debug('recvfrom_into', *args, **kwargs)
def recv_into(self, *args, **kwargs):
return self.debug('recv_into', *args, **kwargs)
def recvfrom(self, *args, **kwargs):
return self.debug('recvfrom', *args, **kwargs)
def recv(self, *args, **kwargs):
return self.debug('recv', *args, **kwargs)
def __getattr__(self, name):
if not self.truesock:
raise UnmockedError()
return getattr(self.truesock, name)
def fake_wrap_socket(s, *args, **kw):
return s
def create_fake_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
s = fakesock.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
s.settimeout(timeout)
if source_address:
s.bind(source_address)
s.connect(address)
return s
def fake_gethostbyname(host):
return '127.0.0.1'
def fake_gethostname():
return 'localhost'
def fake_getaddrinfo(
host, port, family=None, socktype=None, proto=None, flags=None):
return [(2, 1, 6, '', (host, port))]
class Entry(BaseClass):
def __init__(self, method, uri, body,
adding_headers=None,
forcing_headers=None,
status=200,
streaming=False,
**headers):
self.method = method
self.uri = uri
self.info = None
self.request = None
self.body_is_callable = False
if hasattr(body, "__call__"):
self.callable_body = body
self.body = None
self.body_is_callable = True
elif isinstance(body, text_type):
self.body = utf8(body)
else:
self.body = body
self.streaming = streaming
if not streaming and not self.body_is_callable:
self.body_length = len(self.body or '')
else:
self.body_length = 0
self.adding_headers = adding_headers or {}
self.forcing_headers = forcing_headers or {}
self.status = int(status)
for k, v in headers.items():
name = "-".join(k.split("_")).title()
self.adding_headers[name] = v
self.validate()
def validate(self):
content_length_keys = 'Content-Length', 'content-length'
for key in content_length_keys:
got = self.adding_headers.get(
key, self.forcing_headers.get(key, None))
if got is None:
continue
try:
igot = int(got)
except ValueError:
warnings.warn(
'HTTPretty got to register the Content-Length header '
'with "%r" which is not a number' % got,
)
if igot > self.body_length:
raise HTTPrettyError(
'HTTPretty got inconsistent parameters. The header '
'Content-Length you registered expects size "%d" but '
'the body you registered for that has actually length '
'"%d".' % (
igot, self.body_length,
)
)
def __str__(self):
return r'<Entry %s %s getting %d>' % (
self.method, self.uri, self.status)
def normalize_headers(self, headers):
new = {}
for k in headers:
new_k = '-'.join([s.lower() for s in k.split('-')])
new[new_k] = headers[k]
return new
def fill_filekind(self, fk):
now = datetime.utcnow()
headers = {
'status': self.status,
'date': now.strftime('%a, %d %b %Y %H:%M:%S GMT'),
'server': 'Python/HTTPretty',
'connection': 'close',
}
if self.forcing_headers:
headers = self.forcing_headers
if self.adding_headers:
headers.update(self.normalize_headers(self.adding_headers))
headers = self.normalize_headers(headers)
status = headers.get('status', self.status)
if self.body_is_callable:
status, headers, self.body = self.callable_body(
self.request, self.info.full_url(), headers)
if self.request.method != "HEAD":
headers.update({
'content-length': len(self.body)
})
string_list = [
'HTTP/1.1 %d %s' % (status, STATUSES[status]),
]
if 'date' in headers:
string_list.append('date: %s' % headers.pop('date'))
if not self.forcing_headers:
content_type = headers.pop('content-type',
'text/plain; charset=utf-8')
content_length = headers.pop('content-length', self.body_length)
string_list.append('content-type: %s' % content_type)
if not self.streaming:
string_list.append('content-length: %s' % content_length)
string_list.append('server: %s' % headers.pop('server'))
for k, v in headers.items():
string_list.append(
'{0}: {1}'.format(k, v),
)
for item in string_list:
fk.write(utf8(item) + b'\n')
fk.write(b'\r\n')
if self.streaming:
self.body, body = itertools.tee(self.body)
for chunk in body:
fk.write(utf8(chunk))
else:
fk.write(utf8(self.body))
fk.seek(0)
def url_fix(s, charset='utf-8'):
scheme, netloc, path, querystring, fragment = urlsplit(s)
path = quote(path, b'/%')
querystring = quote_plus(querystring, b':&=')
return urlunsplit((scheme, netloc, path, querystring, fragment))
class URIInfo(BaseClass):
def __init__(self,
username='',
password='',
hostname='',
port=80,
path='/',
query='',
fragment='',
scheme='',
last_request=None):
self.username = username or ''
self.password = password or ''
self.hostname = hostname or ''
if port:
port = int(port)
elif scheme == 'https':
port = 443
self.port = port or 80
self.path = path or ''
self.query = query or ''
if scheme:
self.scheme = scheme
elif self.port in POTENTIAL_HTTPS_PORTS:
self.scheme = 'https'
else:
self.scheme = 'http'
self.fragment = fragment or ''
self.last_request = last_request
def __str__(self):
attrs = (
'username',
'password',
'hostname',
'port',
'path',
)
fmt = ", ".join(['%s="%s"' % (k, getattr(self, k, '')) for k in attrs])
return r'<httpretty.URIInfo(%s)>' % fmt
def __hash__(self):
return hash(text_type(self))
def __eq__(self, other):
self_tuple = (
self.port,
decode_utf8(self.hostname.lower()),
url_fix(decode_utf8(self.path)),
)
other_tuple = (
other.port,
decode_utf8(other.hostname.lower()),
url_fix(decode_utf8(other.path)),
)
return self_tuple == other_tuple
def full_url(self, use_querystring=True):
credentials = ""
if self.password:
credentials = "{0}:{1}@".format(
self.username, self.password)
query = ""
if use_querystring and self.query:
query = "?{0}".format(decode_utf8(self.query))
result = "{scheme}://{credentials}{domain}{path}{query}".format(
scheme=self.scheme,
credentials=credentials,
domain=self.get_full_domain(),
path=decode_utf8(self.path),
query=query
)
return result
def get_full_domain(self):
hostname = decode_utf8(self.hostname)
# Port 80/443 should not be appended to the url
if self.port not in DEFAULT_HTTP_PORTS | DEFAULT_HTTPS_PORTS:
return ":".join([hostname, str(self.port)])
return hostname
@classmethod
def from_uri(cls, uri, entry):
result = urlsplit(uri)
if result.scheme == 'https':
POTENTIAL_HTTPS_PORTS.add(int(result.port or 443))
else:
POTENTIAL_HTTP_PORTS.add(int(result.port or 80))
return cls(result.username,
result.password,
result.hostname,
result.port,
result.path,
result.query,
result.fragment,
result.scheme,
entry)
class URIMatcher(object):
regex = None
info = None
def __init__(self, uri, entries, match_querystring=False):
self._match_querystring = match_querystring
if type(uri).__name__ == 'SRE_Pattern':
self.regex = uri
result = urlsplit(uri.pattern)
if result.scheme == 'https':
POTENTIAL_HTTPS_PORTS.add(int(result.port or 443))
else:
POTENTIAL_HTTP_PORTS.add(int(result.port or 80))
else:
self.info = URIInfo.from_uri(uri, entries)
self.entries = entries
# hash of current_entry pointers, per method.
self.current_entries = {}
def matches(self, info):
if self.info:
return self.info == info
else:
return self.regex.search(info.full_url(
use_querystring=self._match_querystring))
def __str__(self):
wrap = 'URLMatcher({0})'
if self.info:
return wrap.format(text_type(self.info))
else:
return wrap.format(self.regex.pattern)
def get_next_entry(self, method, info, request):
"""Cycle through available responses, but only once.
Any subsequent requests will receive the last response"""
if method not in self.current_entries:
self.current_entries[method] = 0
# restrict selection to entries that match the requested method
entries_for_method = [e for e in self.entries if e.method == method]
if self.current_entries[method] >= len(entries_for_method):
self.current_entries[method] = -1
if not self.entries or not entries_for_method:
raise ValueError('I have no entries for method %s: %s'
% (method, self))
entry = entries_for_method[self.current_entries[method]]
if self.current_entries[method] != -1:
self.current_entries[method] += 1
# Attach more info to the entry
# So the callback can be more clever about what to do
# This does also fix the case where the callback
# would be handed a compiled regex as uri instead of the
# real uri
entry.info = info
entry.request = request
return entry
def __hash__(self):
return hash(text_type(self))
def __eq__(self, other):
return text_type(self) == text_type(other)
class httpretty(HttpBaseClass):
"""The URI registration class"""
_entries = {}
latest_requests = []
last_request = HTTPrettyRequestEmpty()
_is_enabled = False
allow_net_connect = True
@classmethod
def match_uriinfo(cls, info):
for matcher, value in cls._entries.items():
if matcher.matches(info):
return (matcher, info)
return (None, [])
@classmethod
@contextlib.contextmanager
def record(cls, filename, indentation=4, encoding='utf-8'):
try:
import urllib3
except ImportError:
raise RuntimeError(
'HTTPretty requires urllib3 installed for recording actual requests.')
http = urllib3.PoolManager()
cls.enable()
calls = []
def record_request(request, uri, headers):
cls.disable()
response = http.request(request.method, uri)
calls.append({
'request': {
'uri': uri,
'method': request.method,
'headers': dict(request.headers),
'body': decode_utf8(request.body),
'querystring': request.querystring
},
'response': {
'status': response.status,
'body': decode_utf8(response.data),
'headers': dict(response.headers)
}
})
cls.enable()
return response.status, response.headers, response.data
for method in cls.METHODS:
cls.register_uri(method, re.compile(
r'.*', re.M), body=record_request)
yield
cls.disable()
with codecs.open(filename, 'w', encoding) as f:
f.write(json.dumps(calls, indent=indentation))
@classmethod
@contextlib.contextmanager
def playback(cls, origin):
cls.enable()
data = json.loads(open(origin).read())
for item in data:
uri = item['request']['uri']
method = item['request']['method']
cls.register_uri(method, uri, body=item['response'][
'body'], forcing_headers=item['response']['headers'])
yield
cls.disable()
@classmethod
def reset(cls):
POTENTIAL_HTTP_PORTS.intersection_update(DEFAULT_HTTP_PORTS)
POTENTIAL_HTTPS_PORTS.intersection_update(DEFAULT_HTTPS_PORTS)
cls._entries.clear()
cls.latest_requests = []
cls.last_request = HTTPrettyRequestEmpty()
@classmethod
def historify_request(cls, headers, body='', append=True):
request = HTTPrettyRequest(headers, body)
cls.last_request = request
if append or not cls.latest_requests:
cls.latest_requests.append(request)
else:
cls.latest_requests[-1] = request
return request
@classmethod
def register_uri(cls, method, uri, body='HTTPretty :)',
adding_headers=None,
forcing_headers=None,
status=200,
responses=None, match_querystring=False,
**headers):
uri_is_string = isinstance(uri, basestring)
if uri_is_string and re.search(r'^\w+://[^/]+[.]\w{2,}$', uri):
uri += '/'
if isinstance(responses, list) and len(responses) > 0:
for response in responses:
response.uri = uri
response.method = method
entries_for_this_uri = responses
else:
headers[str('body')] = body
headers[str('adding_headers')] = adding_headers
headers[str('forcing_headers')] = forcing_headers
headers[str('status')] = status
entries_for_this_uri = [
cls.Response(method=method, uri=uri, **headers),
]
matcher = URIMatcher(uri, entries_for_this_uri,
match_querystring)
if matcher in cls._entries:
matcher.entries.extend(cls._entries[matcher])
del cls._entries[matcher]
cls._entries[matcher] = entries_for_this_uri
def __str__(self):
return '<HTTPretty with %d URI entries>' % len(self._entries)
@classmethod
def Response(cls, body, method=None, uri=None, adding_headers=None, forcing_headers=None,
status=200, streaming=False, **headers):
headers[str('body')] = body
headers[str('adding_headers')] = adding_headers
headers[str('forcing_headers')] = forcing_headers
headers[str('status')] = int(status)
headers[str('streaming')] = streaming
return Entry(method, uri, **headers)
@classmethod
def disable(cls):
cls._is_enabled = False
socket.socket = old_socket
socket.SocketType = old_socket
socket._socketobject = old_socket
socket.create_connection = old_create_connection
socket.gethostname = old_gethostname
socket.gethostbyname = old_gethostbyname
socket.getaddrinfo = old_getaddrinfo
socket.__dict__['socket'] = old_socket
socket.__dict__['_socketobject'] = old_socket
socket.__dict__['SocketType'] = old_socket
socket.__dict__['create_connection'] = old_create_connection
socket.__dict__['gethostname'] = old_gethostname
socket.__dict__['gethostbyname'] = old_gethostbyname
socket.__dict__['getaddrinfo'] = old_getaddrinfo
if socks:
socks.socksocket = old_socksocket
socks.__dict__['socksocket'] = old_socksocket
if ssl:
ssl.wrap_socket = old_ssl_wrap_socket
ssl.SSLSocket = old_sslsocket
ssl.__dict__['wrap_socket'] = old_ssl_wrap_socket
ssl.__dict__['SSLSocket'] = old_sslsocket
if not PY3:
ssl.sslwrap_simple = old_sslwrap_simple
ssl.__dict__['sslwrap_simple'] = old_sslwrap_simple
@classmethod
def is_enabled(cls):
return cls._is_enabled
@classmethod
def enable(cls):
cls._is_enabled = True
# Some versions of python internally shadowed the
# SocketType variable incorrectly https://bugs.python.org/issue20386
bad_socket_shadow = (socket.socket != socket.SocketType)
socket.socket = fakesock.socket
socket._socketobject = fakesock.socket
if not bad_socket_shadow:
socket.SocketType = fakesock.socket
socket.create_connection = create_fake_connection
socket.gethostname = fake_gethostname
socket.gethostbyname = fake_gethostbyname
socket.getaddrinfo = fake_getaddrinfo
socket.__dict__['socket'] = fakesock.socket
socket.__dict__['_socketobject'] = fakesock.socket
if not bad_socket_shadow:
socket.__dict__['SocketType'] = fakesock.socket
socket.__dict__['create_connection'] = create_fake_connection
socket.__dict__['gethostname'] = fake_gethostname
socket.__dict__['gethostbyname'] = fake_gethostbyname
socket.__dict__['getaddrinfo'] = fake_getaddrinfo
if socks:
socks.socksocket = fakesock.socket
socks.__dict__['socksocket'] = fakesock.socket
if ssl:
ssl.wrap_socket = fake_wrap_socket
ssl.SSLSocket = FakeSSLSocket
ssl.__dict__['wrap_socket'] = fake_wrap_socket
ssl.__dict__['SSLSocket'] = FakeSSLSocket
if not PY3:
ssl.sslwrap_simple = fake_wrap_socket
ssl.__dict__['sslwrap_simple'] = fake_wrap_socket
def httprettified(test):
"A decorator tests that use HTTPretty"
def decorate_class(klass):
for attr in dir(klass):
if not attr.startswith('test_'):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
setattr(klass, attr, decorate_callable(attr_value))
return klass
def decorate_callable(test):
@functools.wraps(test)
def wrapper(*args, **kw):
httpretty.reset()
httpretty.enable()
try:
return test(*args, **kw)
finally:
httpretty.disable()
return wrapper
if isinstance(test, ClassTypes):
return decorate_class(test)
return decorate_callable(test)
|
py | b409715aab299eac225adaa56ada8c708cc60f41 | import math
def polysum(n,s):
def areaOfPolygon(n,s):
area = (0.25 * n * s ** 2)/math.tan(math.pi/n)
return area
def perimeterOfPolygon(n,s):
perimeter = n * s
return perimeter
sum = areaOfPolygo(n,s) + (perimeterOfPolygon(n,s) ** 2)
return round(sum,4)
|
py | b4097191a525575be8b428d515b95440d8f5391a | def d(n):
sum = 1
for i in range(2, n):
if n / i == n // i:
sum += i
return sum
def isAmicable(a):
b = d(a)
if a != b and d(b) == a:
return True
return False
sum = 0
for i in range(219, 10000):
if isAmicable(i):
print(i)
sum += i
print(sum)
|
py | b409722893762808e26bb3c84d38d5848f305bf4 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Rigol DG1022 Function Generator Interface
This application will allow the user to control the Rigol DG1022
Function generator through the Qt interface or through the command
line interface. The benefit of this being that the inbuilt ability
to control two channels frequency and voltage in a linked format.
author: [email protected]
last edited: November 2014
"""
import sys
from PyQt4.QtGui import *
class RigolDG():
def __init__(self):
pass
class RigolGui(QWidget):
def __init__(self):
super(RigolGui,self).__init__()
self.initUI()
def initUI(self):
self.setGeometry(300,300,250,150)
self.setWindowTitle("Rigol DG1022DG Interface")
self.setWindowIcon(QIcon('./Wabtec_icon.ico'))
self.show()
def main():
app = QApplication(sys.argv)
fg = RigolGui()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
py | b40973afed0a891ea1cbb5867c9cbcbf3ecbe0a7 | # ===========================================
# log_functions.py
#
# Log functions used to show/write log on display/file
#
# Written by Sajjad Ziyaei amiri (04/12/2016)
# ===========================================
import time
import platform
class uvp_log(object):
def __init__(self):
self.write_to_file = True
self.show_on_screen = False
self.linux_log_path = "/var/log/"
self.windows_log_path = ""
self.log_filename = "uvp.log"
self.error_log_filename = "uvp-error.log"
self.tag = "UVP"
if "Windows" in platform.system() :
self.logpath = self.windows_log_path + self.log_filename
self.errlogpath = self.windows_log_path + self.error_log_filename
elif "Linux" in platform.system():
self.logpath = self.linux_log_path + self.log_filename
self.errlogpath = self.linux_log_path + self.error_log_filename
# ======================================
def set_write_to_file (self,a):
self.write_to_file = a
def set_show_on_screen (self,a):
self.show_on_screen = a
def set_tag (self,a):
self.tag = a
def set_log_filename (self,a):
self.log_filename = a
if "Windows" in platform.system() :
self.logpath = self.windows_log_path + self.log_filename
elif "Linux" in platform.system():
self.logpath = self.linux_log_path + self.log_filename
def set_error_log_filename (self,a):
self.error_log_filename = a
if "Windows" in platform.system() :
self.errlogpath = self.windows_log_path + self.error_log_filename
elif "Linux" in platform.system():
self.errlogpath = self.linux_log_path + self.error_log_filename
# ======================================
def info (self,text):
#print "UVP >> ", text
log = time.strftime("%d/%m/%Y %H:%M:%S")+" >> "+self.tag+" >> INFO >> "+ text
if self.write_to_file:
self._write_log_to_file (log)
if self.show_on_screen:
print log
def warning(self,text):
#print "UVP >> **** WARNING **** " + text
log = time.strftime("%d/%m/%Y %H:%M:%S")+" >> "+self.tag+" >> WARNING >> "+ text
if self.write_to_file:
self._write_log_to_file (log)
self._write_error_to_file (log)
if self.show_on_screen:
print log
def error(self, text):
#print "UVP >> **** ERROR **** " + text
log = time.strftime("%d/%m/%Y %H:%M:%S")+" >> "+self.tag+" >> ERROR >> "+ text
if self.write_to_file:
self._write_log_to_file (log)
self._write_error_to_file (log)
if self.show_on_screen:
print log
# ======================================
def _write_log_to_file(self,text):
f = open(self.logpath,'a')
f.write(text+"\n")
f.close()
def _write_error_to_file(self,text):
f = open(self.errlogpath,'a')
f.write(text+"\n")
f.close() |
py | b40973e80cac5139847f44705c667dc675a894c6 | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
import os
import lyricsgenius
from pylast import User
from userbot import CMD_HELP, GENIUS, LASTFM_USERNAME, lastfm
from userbot.events import register
if GENIUS is not None:
genius = lyricsgenius.Genius(GENIUS)
@register(outgoing=True, pattern="^.lyrics (?:(now)|(.*) - (.*))")
async def lyrics(lyric):
await lyric.edit("`Getting information...`")
if GENIUS is None:
await lyric.edit("`Provide genius access token to Heroku ConfigVars...`")
return False
if lyric.pattern_match.group(1) == "now":
playing = User(LASTFM_USERNAME, lastfm).get_now_playing()
if playing is None:
await lyric.edit("`No information current lastfm scrobbling...`")
return False
artist = playing.get_artist()
song = playing.get_title()
else:
artist = lyric.pattern_match.group(2)
song = lyric.pattern_match.group(3)
await lyric.edit(f"`Searching lyrics for {artist} - {song}...`")
songs = genius.search_song(song, artist)
if songs is None:
await lyric.edit(f"`Song` **{artist} - {song}** `not found...`")
return False
if len(songs.lyrics) > 4096:
await lyric.edit("`Lyrics is too big, view the file to see it.`")
with open("lyrics.txt", "w+") as f:
f.write(f"Search query: \n{artist} - {song}\n\n{songs.lyrics}")
await lyric.client.send_file(
lyric.chat_id,
"lyrics.txt",
reply_to=lyric.id,
)
os.remove("lyrics.txt")
else:
await lyric.edit(
f"**Search query**:\n`{artist}` - `{song}`" f"\n\n```{songs.lyrics}```"
)
return True
CMD_HELP.update(
{
"lyrics": "`.lyrics` **<artist name> - <song name>**"
"\nUsage: Get lyrics matched artist and song."
"\n\n`.lyrics now`"
"\nUsage: Get lyrics artist and song from current lastfm scrobbling."
}
)
|
py | b40973e8c8f229fc1c1e4090cfb7628e0ed86332 | """Utility for creating a GIF.
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Copyright Parag K. Mital, June 2016.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def build_gif(imgs, interval=0.1, dpi=72,
save_gif=True, saveto='animation.gif',
show_gif=False, cmap=None):
"""Take an array or list of images and create a GIF.
Parameters
----------
imgs : np.ndarray or list
List of images to create a GIF of
interval : float, optional
Spacing in seconds between successive images.
dpi : int, optional
Dots per inch.
save_gif : bool, optional
Whether or not to save the GIF.
saveto : str, optional
Filename of GIF to save.
show_gif : bool, optional
Whether or not to render the GIF using plt.
cmap : None, optional
Optional colormap to apply to the images.
Returns
-------
ani : matplotlib.animation.ArtistAnimation
The artist animation from matplotlib. Likely not useful.
"""
imgs = np.asarray(imgs)
h, w, *c = imgs[0].shape
fig, ax = plt.subplots(figsize=(np.round(w / dpi), np.round(h / dpi)))
fig.subplots_adjust(bottom=0)
fig.subplots_adjust(top=1)
fig.subplots_adjust(right=1)
fig.subplots_adjust(left=0)
ax.set_axis_off()
if cmap is not None:
axs = list(map(lambda x: [
ax.imshow(x, cmap=cmap)], imgs))
else:
axs = list(map(lambda x: [
ax.imshow(x)], imgs))
ani = animation.ArtistAnimation(
fig, axs, interval=interval*1000, repeat_delay=0, blit=True)
if save_gif:
ani.save(saveto, writer='imagemagick', dpi=dpi)
if show_gif:
plt.show()
return ani
|
py | b409743210929f01b0ea21c9e354c10c9f217a88 | import datetime
from bs4 import BeautifulSoup
from .source import Source
from ...models import Chapter, Metadata, Novel, Volume
class ReadLightNovelsNet(Source):
name = "Read Light Novels"
base_urls = ("https://readlightnovels.net",)
last_updated = datetime.date(2021, 9, 6)
def novel(self, url: str) -> Novel:
soup = self.get_soup(url)
authors = [a.text.strip() for a in soup.select('.info a[href*="novel-author"]')]
if len(authors) == 2:
author = f"{authors[0]} ({authors[1]})"
else:
author = ", ".join(authors)
title = soup.select_one(".title").text.strip()
if title.endswith(" Novel"):
title = title[: -len(" Novel")]
novel = Novel(
title=title,
author=author,
synopsis=[p.text.strip() for p in soup.select(".desc-text > p")],
thumbnail_url=soup.select_one(".info-holder img")["src"],
url=url,
)
for a in soup.select('a[rel*="tag"]'):
novel.metadata.append(Metadata("subject", a.text.strip()))
pages = soup.select("#pagination > ul > li:not(.dropup) a:last-child")
pages_count = int(pages[-1]["title"]) if pages else 0
novel_id = soup.select_one("#id_post")["value"]
volume = novel.get_default_volume()
for page_index in range(pages_count + 1):
self.chapter_page(volume, novel_id, page_index + 1)
return novel
def chapter_page(self, volume: Volume, novel_id, page):
response = self.http_gateway.post(
"https://readlightnovels.net/wp-admin/admin-ajax.php",
data={
"action": "tw_ajax",
"type": "pagination",
"id": novel_id,
"page": page,
},
)
soup = BeautifulSoup(response.json()["list_chap"], "lxml")
for a in soup.select("ul.list-chapter li a"):
chapter = Chapter(
index=len(volume.chapters),
title=a.text.strip(),
url=a["href"],
)
volume.chapters.append(chapter)
def chapter(self, chapter: Chapter):
soup = self.get_soup(chapter.url)
content = soup.select_one(".chapter-content")
self.clean_contents(content)
for br in content.select("br"):
br.extract()
chapter.paragraphs = str(content)
|
py | b409749e13fdfba4d8ca06447992f446c6639e36 | """Basic Eagle Eye API Module"""
import logging
import requests
from requests import HTTPError
from carson_living.error import (CarsonError,
CarsonAPIError)
from carson_living.eagleeye_entities import EagleEyeCamera
from carson_living.util import update_dictionary
from carson_living.const import (BASE_HEADERS,
EEN_API_URI,
EEN_DEVICE_LIST_ENDPOINT,
EEN_IS_AUTH_ENDPOINT)
_LOGGER = logging.getLogger(__name__)
# pylint: disable=useless-object-inheritance
class EagleEye(object):
"""Eagle Eye API class for interfacing with the endpoints
This class should probably be moved in a dedicated Eagle Eye project,
but initially it can live within Carson Living. Note, the eagle eye
API does not update it's state during initialization, but is updated
externally. Carson Living update automatically triggers an update call
to Eagle Eye.
"""
def __init__(self, session_callback):
self._session_callback = session_callback
self._session_auth_key = None
self._session_brand_subdomain = None
self._cameras = {}
@property
def session_auth_key(self):
"""Current Auth Key"""
return self._session_auth_key
@property
def session_brand_subdomain(self):
"""Current Brand Subdomain"""
return self._session_brand_subdomain
@property
def cameras(self):
"""Get all cameras returned directly by the API"""
return self._cameras.values()
def get_camera(self, ee_id):
"""
Args:
ee_id: Eagle Eye camera id
Returns:
The EagleEye Camera with id or None, if not found.
"""
return self._cameras.get(ee_id)
def update_session_auth_key(self):
"""Updates the internal session state via session_callback
Raises:
CarsonError: If callback returns empty value.
"""
_LOGGER.debug(
'Trying to update the session auth key for the Eagle Eye API.')
auth_key, brand_subdomain = self._session_callback()
if not auth_key or not brand_subdomain:
raise CarsonError(
'Eagle Eye authentication callback returned empty values.')
self._session_auth_key = auth_key
self._session_brand_subdomain = brand_subdomain
def check_auth(self, refresh=True):
"""Check if the current auth_key is still valid
Args:
refresh:
automatically update auth_key if not valid
Returns: True if a valid auth_key exists.
"""
if not refresh and not self._session_auth_key:
return False
retry_auth = 1 if refresh else 0
try:
self.authenticated_query(
EEN_API_URI + EEN_IS_AUTH_ENDPOINT,
retry_auth=retry_auth
)
except CarsonAPIError:
return False
return True
def authenticated_query(self, url, method='get', params=None,
json=None, retry_auth=1, stream=None,
response_handler=lambda r: r.json()):
"""Perform an authenticated Query against Eagle Eye
Args:
url:
the url to query, can contain a branded subdomain
to substitute
method: the http method to use
params: the http params to use
json: the json payload to submit
retry_auth: number of query and reauthentication retries
stream: Stream the content
response_handler: optional file handler to stream the raw content
Returns:
The json response object, or the file handler that was passed
to receive the raw response.
Raises:
CarsonAPIError: Response indicated an client or
server-side API error.
"""
if not self._session_auth_key \
or not self._session_brand_subdomain:
self.update_session_auth_key()
headers = {'Cookie': 'auth_key={}'.format(self._session_auth_key)}
headers.update(BASE_HEADERS)
response = requests.request(method,
url.format(self._session_brand_subdomain),
headers=headers,
params=params,
json=json,
stream=stream)
# special case, clear token and retry. (Recursion)
if response.status_code == 401 and retry_auth > 0:
_LOGGER.info(
'Eagle Eye request %s returned 401, retrying ... (%d left)',
url, retry_auth)
self._session_auth_key = None
return self.authenticated_query(
url, method, params, json, retry_auth - 1,
stream, response_handler)
try:
response.raise_for_status()
return response_handler(response)
except HTTPError as error:
raise CarsonAPIError(error)
def update(self):
"""Update internal state
Update entity list and individual entity parameters associated with the
Eagle Eye API
"""
_LOGGER.debug('Updating Eagle Eye API and associated entities')
self._update_cameras()
def _update_cameras(self):
# Query List
device_list = self.authenticated_query(
EEN_API_URI + EEN_DEVICE_LIST_ENDPOINT
)
update_cameras = {
c[1]: EagleEyeCamera.map_list_to_entity_payload(c)
for c in device_list if c[3] == 'camera'
}
update_dictionary(
self._cameras,
update_cameras,
lambda c: EagleEyeCamera(self, c))
|
py | b40974cb6f4b9e4bfb93127f9c9463fe124e0af7 | #!/usr/bin/env python
# encoding=utf8
# FraternityX
import re, os, platform, cgi, datetime, pprint, sys
import urllib
import urlparse
AGENT_NAME = 'FraternityX'
AGENT_VERSION = '2021.08.25.0'
AGENT_LANGUAGES = [Locale.Language.NoLanguage, Locale.Language.English]
AGENT_FALLBACK_AGENT = False
AGENT_PRIMARY_PROVIDER = False
AGENT_CONTRIBUTES_TO = ['com.plexapp.agents.cockporn']
AGENT_CACHE_TIME = CACHE_1HOUR * 24
AGENT_MATCH_VIDEO_NAME = False
META_ID_SEPARATOR = "|||-|||"
LOG_BIGLINE = '------------------------------------------------------------------------------'
LOG_SUBLINE = '---------------------'
LOG_STARLINE ='******************************************************************************'
def Start():
Log.Info(LOG_BIGLINE)
Log.Info('[' + AGENT_NAME + '] ' + 'Starting Metadata Agent ' + AGENT_VERSION)
HTTP.CacheTime = 0
HTTP.Headers['Cookie'] = 'pp-accepted=true' #Bypasses the age verification screen
HTTP.Headers['User-agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36'
def ValidatePrefs():
Log.Info('[' + AGENT_NAME + '] ' + 'Validating Preferences')
Log.Debug('[' + AGENT_NAME + '] ' + 'Folder(s) where these items might be found: ' + str(Prefs['folders']))
Log.Debug('[' + AGENT_NAME + '] ' + 'Regular expression - ' + str(Prefs['regex']))
Log.Debug('[' + AGENT_NAME + '] ' + 'Cover Images to download - ' + str(Prefs['cover']))
Log.Debug('[' + AGENT_NAME + '] ' + 'Ouput debugging info in logs - ' + str(Prefs['debug']))
Log.Info('[' + AGENT_NAME + '] ' + 'Validation Complete')
def log(state, message, *args):
if state == 'info':
Log.Info('[' + AGENT_NAME + '] ' + ' - ' + message, *args)
elif state == 'error':
Log.Error('[' + AGENT_NAME + '] ' + ' - ' + message, *args)
elif Prefs['debug'] and state == 'debug':
Log.Debug('[' + AGENT_NAME + '] ' + ' - ' + message, *args)
import utils
from studioVars import SCRAPER_PATHS, STUDIO_MAP, URL, REQUEST_DELAY
class FraternityX(Agent.Movies):
name = AGENT_NAME
languages = AGENT_LANGUAGES
media_types = ['Movie']
primary_provider = AGENT_PRIMARY_PROVIDER
fallback_agent = AGENT_FALLBACK_AGENT
contributes_to = AGENT_CONTRIBUTES_TO
def search(self, results, media, lang):
log('info', LOG_BIGLINE)
log('info', '%s> search::init:%s', LOG_SUBLINE, media.title)
log('info', LOG_BIGLINE)
log('debug', 'search::%s | Platform: %s %s', media.title, platform.system(), platform.release())
log('debug', 'search::%s | results - %s', media.title, results)
log('debug', 'search::%s | media.items[0].parts[0].file - %s', media.title, media.items[0].parts[0].file)
log('debug', 'search::%s | media.filename - %s', media.title, media.filename)
log('debug', 'search::%s | %s', media.title, results)
if not media.items[0].parts[0].file:
return
path_and_file = media.items[0].parts[0].file
log('debug', 'search::%s | Filepath - %s', media.title, path_and_file)
path_and_file = os.path.splitext(path_and_file)[0]
enclosing_directory, file_name = os.path.split(os.path.splitext(path_and_file)[0])
enclosing_directory, enclosing_folder = os.path.split(enclosing_directory)
log('debug', 'search::%s | Enclosing Folder - %s', media.title, enclosing_folder)
log('debug', 'search::%s | Enclosing Directory - %s', media.title, enclosing_directory)
log('debug', 'search::%s | File Name - %s', media.title, file_name)
if Prefs['folders'] != "*":
folder_list = re.split(',\s*', Prefs['folders'])
file_folders = utils.splitall(path_and_file)
log('debug', 'search::%s | Looking for folder matched - Folders enabled: [%s] ', media.title, ','.join(folder_list))
log('debug', 'search::%s | Item folder - Folders enabled: [%s] ', media.title, ','.join(file_folders))
folder_matched = False
for folder in file_folders:
if folder in folder_list:
folder_matched = True
log('info', 'search::%s | Folder matched - %s', media.title, folder)
if folder_matched == False:
log('info', 'search::%s | No folder match found - Skipping media', media.title)
log('debug', LOG_BIGLINE)
return
# File names to match for this agent
log('debug', 'search::%s | Regular expression: %s', media.title, str(Prefs['regex']))
try:
file_name_pattern = re.compile(Prefs['regex'], re.IGNORECASE)
except Exception as e:
log('error', LOG_STARLINE)
log('error', 'search::%s | Error with regex - %s | %s', media.title, path_and_file, e)
log('error', LOG_STARLINE)
return
m = file_name_pattern.search(file_name)
if not m:
log('debug', 'search::%s | File %s not in expected format - Skipping...', media.title, file_name)
log('debug', LOG_BIGLINE)
return
groups = m.groupdict()
clip_number = file_studio = clip_name = None
file_studio = groups['studio']
if 'file_studio' in groups:
file_studio = groups['file_studio']
if 'clip_number' in groups:
clip_number = groups['clip_number']
clip_name = groups['clip_name']
log('debug', 'search::%s | Studio - %s', media.title, file_studio)
log('debug', 'search::%s | Clip Number - %s', media.title, clip_number)
log('debug', 'search::%s | Clip Name - %s', media.title, clip_name)
if file_studio is not None and AGENT_MATCH_VIDEO_NAME==True and file_studio.lower() != AGENT_NAME.lower():
log('debug', 'search::%s | Skipping %s because does not match: %s', media.title, file_name, AGENT_NAME)
return
if clip_number is not None:
url = URL["Video"] % clip_number
title = self.fetch_title_search(url, media.title)
log('info', 'search::%s | Clipnumber match [%s]', media.title, clip_number)
log('info', 'search::%s | Clip name [%s]', media.title, clip_name)
log('info', 'search::%s | URL [%s]', media.title, url)
log('info', LOG_BIGLINE)
results.Append(MetadataSearchResult(id = url, name = title, score = 98, lang = lang))
return
search_query_raw = list()
for piece in clip_name.split(' '):
search_query_raw.append(cgi.escape(piece))
search_query="+".join(search_query_raw)
log('debug', 'search::%s | Search query - %s', media.title, search_query)
htmlElement=HTML.ElementFromURL(URL["Search"] % search_query, sleep=REQUEST_DELAY)
search_results=htmlElement.xpath(SCRAPER_PATHS['Search'])
log('debug', 'search::%s | Browsing results - %s', media.title, SCRAPER_PATHS['Search'])
search_results_videos = []
log('debug', 'search::%s | (%s) movies found', media.title, len(search_results))
if len(search_results) > 0:
for result in search_results:
video_title = result.xpath(SCRAPER_PATHS['SearchVideoTitle'])[0].strip()
log('debug', 'search::%s | Search results for "%s": - "%s"', media.title, clip_name, video_title)
video_title = video_title.replace(AGENT_NAME, "").replace(AGENT_NAME.replace(" ",""),"")
log('debug', 'search::%s | Trimmed result "%s"', media.title, video_title)
url = result.xpath(SCRAPER_PATHS['SearchVideoUrl'])[0]
if url.startswith("/url?"):
log('debug', 'search::%s | Parsing URL"', media.title)
video_url = urlparse.parse_qs(urlparse.urlparse(url).query)['url'][0]
log('debug', 'search::%s | Parsed URL %s', media.title, video_url)
elif url.startswith("http"):
video_url = url
else:
video_url = URL["Base"] % url
log('debug', 'search::%s | Video url - %s', media.title, video_url)
video_title = self.fetch_title_search(video_url, media.title)
if video_title.strip().lower() == clip_name.strip().lower():
log('info', 'search::%s | MATCH: TITLE [%s]', media.title, clip_name)
log('info', 'search::%s | Title [%s]', media.title, video_title)
log('info', 'search::%s | URL [%s]', media.title, video_url)
results.Append(MetadataSearchResult(id = video_url, name = video_title, score = 90, lang = lang))
log('info', LOG_BIGLINE)
return
else:
search_results_videos.append({"title": video_title, "url": video_url})
log('info', 'search::%s | Match: CLOSEST [%s]', media.title, clip_name)
log('info', 'search::%s | Title [%s]', media.title, video_title)
log('info', 'search::%s | URL [%s]', media.title, video_url)
log('info', 'search::%s | Returning closest match for "%s" - [site''s title: "%s", url: "%s"]', media.title, clip_name, search_results_videos[0]['title'], search_results_videos[0]['url'])
results.Append(MetadataSearchResult(id = search_results_videos[0]['url'], name = search_results_videos[0]['title'], score = 80, lang = lang))
log('info', LOG_BIGLINE)
return
else:
log('info', 'search::%s | No results for clip: %s', media.title, clip_name)
log('info', LOG_BIGLINE)
return
def fetch_title_search(self, url, id):
log('debug', 'fetch_title_search::init::%s', id)
htmlElement=HTML.ElementFromURL(url, sleep=REQUEST_DELAY)
name=self.fetch_title(htmlElement, id)
log('debug', 'fetch_title_search::%s | Video Title for search : %s', id, name)
return name
def fetch_title(self, html, id):
if not SCRAPER_PATHS['VideoTitle']:
return
log('debug', 'fecth_title::%s init', id)
title = [0, 1]
log('debug', 'fetch_title::%s | Video_title search: "%s"', id, SCRAPER_PATHS['VideoTitle'])
xpathTitle=html.xpath(SCRAPER_PATHS['VideoTitle'])
if len(xpathTitle) > 0:
title[0] = xpathTitle[0]
log('debug', 'fetch_title::%s | Video_title found: "%s"', id, title[0])
else:
title[0] = "TITLE_NOT_FOUND"
log('debug', 'fetch_title::%s | No title found', id)
return title[0]
def fetch_title_meta(self, html, metadata):
metadata.title = self.fetch_title(html, metadata.id)
def fetch_date(self, html, metadata):
if not SCRAPER_PATHS['ReleaseDate']:
return
log('debug', 'fetch_date::init::%s', metadata.id)
xpath = html.xpath(SCRAPER_PATHS['ReleaseDate'])
log('debug', 'fetch_date::init::%s - XPATH result', xpath)
if xpath:
if isinstance(xpath, list):
release_date=xpath[0].strip()
else:
release_date=xpath.strip()
if (release_date):
release_date = html.xpath(SCRAPER_PATHS['ReleaseDate'])[0].replace("Published on","").strip()
log('debug', 'fetch_date::%s | %s', metadata.id, release_date)
date_original = Datetime.ParseDate(release_date).date()
metadata.originally_available_at = date_original
metadata.year = metadata.originally_available_at.year
else:
log('debug', 'fetch_date::%s | No Date to fetch for this studio')
def fetch_summary(self, html, metadata):
if not SCRAPER_PATHS['VideoSummary']:
return
log('debug', 'fetch_summary::init::%s', metadata.id)
try:
xpath = html.xpath(SCRAPER_PATHS['VideoSummary'])
if isinstance(xpath, list):
video_summary=xpath[0].strip()
else:
video_summary=xpath.strip()
log('debug', 'fetch_summary::%s | Fetched summary %s', metadata.id, video_summary)
metadata.summary = video_summary
except Exception as e:
log('error', LOG_STARLINE)
log('error', 'Error in fetch_summary::%s || %s', metadata.id, e)
log('error', LOG_STARLINE)
pass
def fetch_cast(self, html, metadata):
if not SCRAPER_PATHS['CastMembers']:
return
log('debug', 'fetch_cast::init::%s', metadata.id)
try:
video_cast=html.xpath(SCRAPER_PATHS['CastMembers'])
log('debug', 'fetch_cast::%s | %s Cast members found', metadata.id, len(video_cast))
metadata.roles.clear()
for cast in video_cast:
log('debug', 'fetch_cast::%s | xpath result name %s', metadata.id, cast.xpath(SCRAPER_PATHS['CastName']))
cname = cast.xpath(SCRAPER_PATHS['CastName'])[0].strip(', ')
log('debug', 'fetch_cast::%s | Cast Member %s', metadata.id, cname)
log('debug', 'fetch_cast::%s | xpath result url %s', metadata.id, cast.xpath(SCRAPER_PATHS['CastUrl']))
# Extracting cast members photo
castUrlPath = cast.xpath(SCRAPER_PATHS['CastUrl'])
if len(castUrlPath) > 0:
castUrl = castUrlPath[0].strip()
castUrl = castUrl = URL["Base"] % castUrl if castUrl.startswith("/") else castUrl
log('debug', 'fetch_cast::%s | Cash Url %s', metadata.id, castUrl)
castHtml = HTML.ElementFromURL(castUrl, sleep=REQUEST_DELAY)
castPhotos = castHtml.xpath(SCRAPER_PATHS['CastPhoto'])
log('debug', 'fetch_cast::%s | xpath result cast photos %s', metadata.id, castPhotos)
if len(castPhotos) > 0 :
castPhoto = castHtml.xpath(SCRAPER_PATHS['CastPhoto'])[0].strip()
castPhoto = castPhoto = URL["Base"] % castPhoto if castPhoto.startswith("/") else castPhoto
log('debug', 'fetch_cast::%s | Cash Photo %s', metadata.id, castPhoto)
if (len(cname) > 0):
role = metadata.roles.new()
role.name = cname
role.photo = castPhoto
except Exception as e:
log('error', LOG_STARLINE)
log('error', 'Error in fetch_cast::%s || %s', metadata.id, e)
log('error', LOG_STARLINE)
pass
def fetch_genres(self, html, metadata):
if not SCRAPER_PATHS['Genres']:
return
log('debug', 'fetch_genres::init::%s', metadata.id)
metadata.genres.clear()
log('debug', 'fetch_genres::%s | xpath %s', metadata.id, SCRAPER_PATHS['Genres'])
genres=html.xpath(SCRAPER_PATHS['Genres'])
log('debug', 'fetch_cast::%s | Genres extracted %s', metadata.id, genres)
for genre in genres:
genre = genre.strip()
if (len(genre) > 0):
metadata.genres.add(genre)
def fetch_studio(self, html, metadata):
log('debug', 'fetch_studio::init::%s', metadata.id)
metadata.studio = AGENT_NAME
if SCRAPER_PATHS['Studio'].strip():
log('debug', 'fetch_studio::%s | xpath %s', metadata.id, SCRAPER_PATHS['Studio'])
xpath_result = html.xpath(SCRAPER_PATHS['Studio'])
if len(xpath_result) > 0:
studio=xpath_result[0].strip()
metadata.studio = studio
log('debug', 'fetch_studio::%s | Studio extracted - "%s"', metadata.id, studio)
if STUDIO_MAP is not None:
if studio.lower() in STUDIO_MAP:
metadata.studio = STUDIO_MAP[studio.lower()]
log('debug', 'fetch_studio::%s | Studio %s', metadata.id, metadata.studio)
if not metadata.studio in metadata.collections:
log('debug', 'fetch_studio::%s | Adding to collection %s', metadata.id, metadata.studio)
metadata.collections.add(metadata.studio)
return
def fetch_images(self, html, metadata):
log('debug', 'fetch_images::init::%s', metadata.id)
i = 0
try:
coverPrefs = int(Prefs['cover'])
except ValueError:
# an absurdly high number means "download all the things"
coverPrefs = 10000
imageType = 'Poster & Art'
try:
log('debug', LOG_SUBLINE)
htmlimages = []
posterIndex = -1
if SCRAPER_PATHS['Poster']:
log('debug', 'fetch_images::%s | poster xpath - %s', metadata.id, SCRAPER_PATHS['Poster'])
fetched_posters = html.xpath(SCRAPER_PATHS['Poster'])
if len(fetched_posters) > 0:
log('debug', 'fetch_images::%s | poster found - %s', metadata.id, fetched_posters[0])
htmlimages.append(fetched_posters[0])
posterIndex = 0
if SCRAPER_PATHS['Art']:
log('debug', 'fetch_images::%s | art xpath - %s', metadata.id, SCRAPER_PATHS['Art'])
htmlimages = htmlimages + html.xpath(SCRAPER_PATHS['Art'])
log('debug', 'fetch_images::%s | (%s) images found - %s', metadata.id, len(htmlimages), htmlimages)
if posterIndex == -1:
posterIndex = len(htmlimages) // 2
if posterIndex < len(htmlimages):
posterIndex = posterIndex + 1
log('debug', 'fetch_images::%s | poster index to be used - %s', metadata.id, posterIndex)
log('debug', 'fetch_images::%s | current posters - %s', metadata.id, len(metadata.posters))
log('debug', 'fetch_images::%s | current arts - %s', metadata.id, len(metadata.art))
referrer = URL["AddReferrer"]
for index, image in enumerate(htmlimages):
if image.startswith("/") :
image = URL['Base'] % image
if index < 4 or index == posterIndex :
image = image.replace('.webp', '.jpg') # change extension of url image
whRatio = 1.5 if index == 0 else 0.5625
imageType = 'Poster' if (index == 0 or index == posterIndex) else 'Art'
pic, picContent = utils.getFilmImages(imageType, image, whRatio) # height is 1.5 times the width for posters
if (index == 0 or posterIndex == index): # processing posters
# clean up and only keep the posters we have added
log('debug', 'fetch_images::%s | Adding poster - %s', metadata.id, image)
if referrer == True:
metadata.posters[pic] = Proxy.Media(picContent, sort_order=index + 1)
else:
metadata.posters[pic] = Proxy.Preview(picContent, sort_order=index + 1)
if index < 4 or len(metadata.art) < 4: # processing art
log('debug', 'fetch_images::%s | Adding art - %s', metadata.id, pic)
if referrer == True:
metadata.art[pic] = Proxy.Media(picContent, sort_order=index)
else:
metadata.art[pic] = Proxy.Preview(picContent, sort_order=index)
log('debug', 'fetch_images::%s | posters after - %s', metadata.id, len(metadata.posters))
log('debug', 'fetch_images::%s | arts after - %s', metadata.id, len(metadata.art))
except Exception as e:
log('error', LOG_STARLINE)
log('error', 'Error in fetch_images::%s || %s', metadata.id, e)
log('error', LOG_STARLINE)
def update(self, metadata, media, lang):
log('info', LOG_BIGLINE)
log('info', '%s> update::init:%s', LOG_SUBLINE, metadata.id)
log('info', LOG_BIGLINE)
if metadata.tagline:
log('debug', 'update::%s | Contains tagline, url set to - %s', metadata.id, metadata.tagline)
url = metadata.tagline
metadata.id = metadata.tagline
else:
log('debug', 'update::%s | No tagline set for this metadata (%s)', metadata.id, metadata.tagline)
# Set tagline to URL
url = metadata.id
metadata.tagline = url
enclosing_directory, file_name = os.path.split(os.path.splitext(media.items[0].parts[0].file)[0])
file_name = file_name.lower()
log('debug', 'update::%s | File Name - %s', metadata.id, file_name)
if not media.items[0].parts[0].file:
return
file_path = media.items[0].parts[0].file
log('debug', 'update::%s | File Path - %s', metadata.id, file_path)
log('debug', 'update::%s | Fetching HTML from %s', metadata.id, url)
# Fetch HTML
log('debug', 'update::%s | Fetching HTML from %s', metadata.id, url)
html = HTML.ElementFromURL(url)
log('debug', 'update::%s | HTML fecthed', metadata.id)
# Set additional metadata
metadata.content_rating = 'X'
try:
self.fetch_studio(html, metadata)
except Exception as e:
log('error', LOG_STARLINE)
log('error', 'update::exception::%s | Error in fetch_studio:: %s', metadata.id, e)
log('error', LOG_STARLINE)
pass
# Try to get the title
try:
self.fetch_title_meta(html, metadata)
except Exception as e:
log('error', LOG_STARLINE)
log('error', 'update::exception::%s | Error in fetch_title::fetch_title %s', metadata.id, e)
log('error', LOG_STARLINE)
pass
# Try to get the release date
try:
self.fetch_date(html, metadata)
except Exception as e:
log('error', LOG_STARLINE)
log('error', 'update::exception::%s | Error in fetch_date:: %s', metadata.id, e)
log('error', LOG_STARLINE)
pass
# Try to get the summary
try:
self.fetch_summary(html, metadata)
except Exception as e:
log('error', LOG_STARLINE)
log('error', 'update::exception::%s | Error in fetch_summary:: %s', metadata.id, e)
log('error', LOG_STARLINE)
pass
# Try to get the cast
try:
self.fetch_cast(html, metadata)
except Exception as e:
log('error', LOG_STARLINE)
log('error', 'update::exception::%s | Error in fetch_cast:: %s', metadata.id, e)
log('error', LOG_STARLINE)
pass
# Try to get the genres
try:
self.fetch_genres(html, metadata)
except Exception as e:
log('error', LOG_STARLINE)
log('error', 'update::exception::%s | Exception in fetch_genres:: %s', metadata.id, e)
log('error', LOG_STARLINE)
pass
# Try to get the video images
try:
self.fetch_images(html, metadata)
except Exception as e:
log('error', LOG_STARLINE)
log('error', 'UPDATE - Exception in fetch_images:: %s', metadata.id)
log('error', LOG_STARLINE)
pass
log('info', '%s> update::%s - Success :) :) :)', LOG_SUBLINE, metadata.id)
log('info', LOG_BIGLINE) |
py | b4097501b636c0ca3bbc1d55eebffb8a5c07fc22 | # зададим список
numbers = [2, 4, 6, 8, 10]
languages = ['Python', 'C#', 'C++', 'Java']
info = ['Timur', 1992, 61.5]
num_of_nums = [numbers, languages, info]
for i in num_of_nums:
print(i)
print()
mylist1 = [] # пустой список
mylist2 = list() # пустой список
print()
print('Вывод списка:')
print(numbers)
print(languages)
print()
print('функция list()')
numbers = list(range(5))
print(numbers)
print()
even_numbers = list(range(0, 10, 2)) # список содержит четные числа 0, 2, 4, 6, 8
odd_numbers = list(range(1, 10, 2)) # список содержит нечетные числа 1, 3, 5, 7, 9
print(even_numbers, odd_numbers)
print()
s = 'abcde'
chars = list(s) # список содержит символы 'a', 'b', 'c', 'd', 'e'
print(chars)
print()
|
py | b409764340e31dfc63f23a96ee1148d2ffab8ce5 | # --------------------------------------------------------
# Deep Feature Flow
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Yuwen Xiong, Xizhou Zhu
# --------------------------------------------------------
import cPickle
import mxnet as mx
from utils.symbol import Symbol
from operator_py.proposal import *
from operator_py.proposal_target import *
from operator_py.box_annotator_ohem import *
from operator_py.rpn_inv_normalize import *
from operator_py.tile_as import *
class resnet_v1_101_flownet_deeplab(Symbol):
def __init__(self):
"""
Use __init__ to define parameter network needs
"""
self.eps = 1e-5
self.use_global_stats = True
self.workspace = 512
self.units = (3, 4, 23, 3) # use for 101
self.filter_list = [256, 512, 1024, 2048]
def get_resnet_dcn(self, data):
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2),
no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps = self.eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3),
stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(name='res2a', *[scale2a_branch1, scale2a_branch2c])
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(name='res2b', *[res2a_relu, scale2b_branch2c])
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(name='res2c', *[res2b_relu, scale2c_branch2c])
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(name='res3a', *[scale3a_branch1, scale3a_branch2c])
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a,
act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b,
act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(name='res3b1', *[res3a_relu, scale3b1_branch2c])
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a,
act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b,
act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(name='res3b2', *[res3b1_relu, scale3b2_branch2c])
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a,
act_type='relu')
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b,
act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(name='res3b3', *[res3b2_relu, scale3b3_branch2c])
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(name='res4a', *[scale4a_branch1, scale4a_branch2c])
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a,
act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b,
act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(name='res4b1', *[res4a_relu, scale4b1_branch2c])
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a,
act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b,
act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(name='res4b2', *[res4b1_relu, scale4b2_branch2c])
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a,
act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b,
act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(name='res4b3', *[res4b2_relu, scale4b3_branch2c])
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a,
act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b,
act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(name='res4b4', *[res4b3_relu, scale4b4_branch2c])
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a,
act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b,
act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(name='res4b5', *[res4b4_relu, scale4b5_branch2c])
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a,
act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b,
act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(name='res4b6', *[res4b5_relu, scale4b6_branch2c])
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a,
act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b,
act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(name='res4b7', *[res4b6_relu, scale4b7_branch2c])
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a,
act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b,
act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(name='res4b8', *[res4b7_relu, scale4b8_branch2c])
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a,
act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b,
act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(name='res4b9', *[res4b8_relu, scale4b9_branch2c])
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a,
act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b,
act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(name='res4b10', *[res4b9_relu, scale4b10_branch2c])
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a,
act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b,
act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(name='res4b11', *[res4b10_relu, scale4b11_branch2c])
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a,
act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b,
act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(name='res4b12', *[res4b11_relu, scale4b12_branch2c])
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a,
act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b,
act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(name='res4b13', *[res4b12_relu, scale4b13_branch2c])
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a,
act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b,
act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(name='res4b14', *[res4b13_relu, scale4b14_branch2c])
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a,
act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b,
act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(name='res4b15', *[res4b14_relu, scale4b15_branch2c])
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a,
act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b,
act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(name='res4b16', *[res4b15_relu, scale4b16_branch2c])
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a,
act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b,
act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(name='res4b17', *[res4b16_relu, scale4b17_branch2c])
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a,
act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b,
act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(name='res4b18', *[res4b17_relu, scale4b18_branch2c])
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a,
act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b,
act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(name='res4b19', *[res4b18_relu, scale4b19_branch2c])
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a,
act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b,
act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(name='res4b20', *[res4b19_relu, scale4b20_branch2c])
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a,
act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b,
act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(name='res4b21', *[res4b20_relu, scale4b21_branch2c])
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a,
act_type='relu')
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b,
act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(name='res4b22', *[res4b21_relu, scale4b22_branch2c])
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5a_branch1 = bn5a_branch1
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
res5a_branch2b_offset_weight = mx.symbol.Variable('res5a_branch2b_offset_weight', lr_mult=1.0)
res5a_branch2b_offset_bias = mx.symbol.Variable('res5a_branch2b_offset_bias', lr_mult=2.0)
res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data = res5a_branch2a_relu,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1),
weight=res5a_branch2b_offset_weight, bias=res5a_branch2b_offset_bias)
res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset,
num_filter=512, pad=(2, 2), kernel=(3, 3), num_deformable_group=1,
stride=(1, 1), dilate=(2, 2), no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale5a_branch2c = bn5a_branch2c
res5a = mx.symbol.broadcast_add(name='res5a', *[scale5a_branch1, scale5a_branch2c])
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
res5b_branch2b_offset_weight = mx.symbol.Variable('res5b_branch2b_offset_weight', lr_mult=1.0)
res5b_branch2b_offset_bias = mx.symbol.Variable('res5b_branch2b_offset_bias', lr_mult=2.0)
res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data = res5b_branch2a_relu,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1),
weight=res5b_branch2b_offset_weight, bias=res5b_branch2b_offset_bias)
res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset,
num_filter=512, pad=(2, 2), kernel=(3, 3), num_deformable_group=1,
stride=(1, 1), dilate=(2, 2), no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale5b_branch2c = bn5b_branch2c
res5b = mx.symbol.broadcast_add(name='res5b', *[res5a_relu, scale5b_branch2c])
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
res5c_branch2b_offset_weight = mx.symbol.Variable('res5c_branch2b_offset_weight', lr_mult=1.0)
res5c_branch2b_offset_bias = mx.symbol.Variable('res5c_branch2b_offset_bias', lr_mult=2.0)
res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data = res5c_branch2a_relu,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1),
weight=res5c_branch2b_offset_weight, bias=res5c_branch2b_offset_bias)
res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset,
num_filter=512, pad=(2, 2), kernel=(3, 3), num_deformable_group=1,
stride=(1, 1), dilate=(2, 2), no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale5c_branch2c = bn5c_branch2c
res5c = mx.symbol.broadcast_add(name='res5c', *[res5b_relu, scale5c_branch2c])
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
return res5c_relu
def get_resnet_v1(self, data):
conv1 = mx.symbol.Convolution(name='conv1', data=data , num_filter=64, pad=(3,3), kernel=(7,7), stride=(2,2), no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1 , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1 , act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu , pad=(1,1), kernel=(3,3), stride=(2,2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1 , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1 , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1 , num_filter=64, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a , act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu , num_filter=64, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b , act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(name='res2a', *[scale2a_branch1,scale2a_branch2c] )
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a , act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu , num_filter=64, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a , act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu , num_filter=64, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b , act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(name='res2b', *[res2a_relu,scale2b_branch2c] )
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b , act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu , num_filter=64, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a , act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu , num_filter=64, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b , act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(name='res2c', *[res2b_relu,scale2c_branch2c] )
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c , act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu , num_filter=512, pad=(0,0), kernel=(1,1), stride=(2,2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1 , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu , num_filter=128, pad=(0,0), kernel=(1,1), stride=(2,2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a , act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu , num_filter=128, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b , act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu , num_filter=512, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(name='res3a', *[scale3a_branch1,scale3a_branch2c] )
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a , act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu , num_filter=128, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a , act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu , num_filter=128, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b , act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu , num_filter=512, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(name='res3b1', *[res3a_relu,scale3b1_branch2c] )
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1 , act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu , num_filter=128, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a , act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu , num_filter=128, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b , act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu , num_filter=512, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(name='res3b2', *[res3b1_relu,scale3b2_branch2c] )
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2 , act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu , num_filter=128, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a , act_type='relu')
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu , num_filter=128, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b , act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu , num_filter=512, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(name='res3b3', *[res3b2_relu,scale3b3_branch2c] )
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3 , act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(2,2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1 , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(2,2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a , act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b , act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(name='res4a', *[scale4a_branch1,scale4a_branch2c] )
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a , act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a , act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b , act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(name='res4b1', *[res4a_relu,scale4b1_branch2c] )
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1 , act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a , act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b , act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(name='res4b2', *[res4b1_relu,scale4b2_branch2c] )
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2 , act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a , act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b , act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(name='res4b3', *[res4b2_relu,scale4b3_branch2c] )
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3 , act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a , act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b , act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(name='res4b4', *[res4b3_relu,scale4b4_branch2c] )
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4 , act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a , act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b , act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(name='res4b5', *[res4b4_relu,scale4b5_branch2c] )
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5 , act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a , act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b , act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(name='res4b6', *[res4b5_relu,scale4b6_branch2c] )
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6 , act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a , act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b , act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(name='res4b7', *[res4b6_relu,scale4b7_branch2c] )
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7 , act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a , act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b , act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(name='res4b8', *[res4b7_relu,scale4b8_branch2c] )
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8 , act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a , act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b , act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(name='res4b9', *[res4b8_relu,scale4b9_branch2c] )
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9 , act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a , act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b , act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(name='res4b10', *[res4b9_relu,scale4b10_branch2c] )
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10 , act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a , act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b , act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(name='res4b11', *[res4b10_relu,scale4b11_branch2c] )
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11 , act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a , act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b , act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(name='res4b12', *[res4b11_relu,scale4b12_branch2c] )
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12 , act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a , act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b , act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(name='res4b13', *[res4b12_relu,scale4b13_branch2c] )
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13 , act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a , act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b , act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(name='res4b14', *[res4b13_relu,scale4b14_branch2c] )
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14 , act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a , act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b , act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(name='res4b15', *[res4b14_relu,scale4b15_branch2c] )
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15 , act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a , act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b , act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(name='res4b16', *[res4b15_relu,scale4b16_branch2c] )
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16 , act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a , act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b , act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(name='res4b17', *[res4b16_relu,scale4b17_branch2c] )
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17 , act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a , act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b , act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(name='res4b18', *[res4b17_relu,scale4b18_branch2c] )
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18 , act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a , act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b , act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(name='res4b19', *[res4b18_relu,scale4b19_branch2c] )
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19 , act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a , act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b , act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(name='res4b20', *[res4b19_relu,scale4b20_branch2c] )
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20 , act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a , act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b , act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(name='res4b21', *[res4b20_relu,scale4b21_branch2c] )
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21 , act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a , act_type='relu')
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b , act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(name='res4b22', *[res4b21_relu,scale4b22_branch2c] )
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22 , act_type='relu')
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu , num_filter=2048, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1 , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5a_branch1 = bn5a_branch1
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu , num_filter=512, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a , act_type='relu')
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu , num_filter=512, pad=(2,2), dilate=(2,2), kernel=(3,3), stride=(1,1), no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b , act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu , num_filter=2048, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5a_branch2c = bn5a_branch2c
res5a = mx.symbol.broadcast_add(name='res5a', *[scale5a_branch1,scale5a_branch2c] )
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a , act_type='relu')
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu , num_filter=512, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a , act_type='relu')
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu , num_filter=512, pad=(2,2), dilate=(2,2), kernel=(3,3), stride=(1,1), no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b , act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu , num_filter=2048, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5b_branch2c = bn5b_branch2c
res5b = mx.symbol.broadcast_add(name='res5b', *[res5a_relu,scale5b_branch2c] )
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b , act_type='relu')
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu , num_filter=512, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a , act_type='relu')
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu , num_filter=512, pad=(2,2), dilate=(2,2), kernel=(3,3), stride=(1,1), no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b , act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu , num_filter=2048, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5c_branch2c = bn5c_branch2c
res5c = mx.symbol.broadcast_add(name='res5c', *[res5b_relu,scale5c_branch2c] )
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c , act_type='relu')
return res5c_relu
# feat_conv_3x3 = mx.sym.Convolution(
# data=res5c_relu, kernel=(3, 3), pad=(6, 6), dilate=(6, 6), num_filter=1024, name="feat_conv_3x3")
# feat_conv_3x3_relu = mx.sym.Activation(data=feat_conv_3x3, act_type="relu", name="feat_conv_3x3_relu")
# return feat_conv_3x3_relu
def get_flownet(self, img_cur, img_ref):
data = mx.symbol.Concat(img_cur / 255.0, img_ref / 255.0, dim=1)
resize_data = mx.symbol.Pooling(name='resize_data', data=data , pooling_convention='full', pad=(0,0), kernel=(2,2), stride=(2,2), pool_type='avg')
flow_conv1 = mx.symbol.Convolution(name='flow_conv1', data=resize_data , num_filter=64, pad=(3,3), kernel=(7,7), stride=(2,2), no_bias=False)
ReLU1 = mx.symbol.LeakyReLU(name='ReLU1', data=flow_conv1 , act_type='leaky', slope=0.1)
conv2 = mx.symbol.Convolution(name='conv2', data=ReLU1 , num_filter=128, pad=(2,2), kernel=(5,5), stride=(2,2), no_bias=False)
ReLU2 = mx.symbol.LeakyReLU(name='ReLU2', data=conv2 , act_type='leaky', slope=0.1)
conv3 = mx.symbol.Convolution(name='conv3', data=ReLU2 , num_filter=256, pad=(2,2), kernel=(5,5), stride=(2,2), no_bias=False)
ReLU3 = mx.symbol.LeakyReLU(name='ReLU3', data=conv3 , act_type='leaky', slope=0.1)
conv3_1 = mx.symbol.Convolution(name='conv3_1', data=ReLU3 , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False)
ReLU4 = mx.symbol.LeakyReLU(name='ReLU4', data=conv3_1 , act_type='leaky', slope=0.1)
conv4 = mx.symbol.Convolution(name='conv4', data=ReLU4 , num_filter=512, pad=(1,1), kernel=(3,3), stride=(2,2), no_bias=False)
ReLU5 = mx.symbol.LeakyReLU(name='ReLU5', data=conv4 , act_type='leaky', slope=0.1)
conv4_1 = mx.symbol.Convolution(name='conv4_1', data=ReLU5 , num_filter=512, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False)
ReLU6 = mx.symbol.LeakyReLU(name='ReLU6', data=conv4_1 , act_type='leaky', slope=0.1)
conv5 = mx.symbol.Convolution(name='conv5', data=ReLU6 , num_filter=512, pad=(1,1), kernel=(3,3), stride=(2,2), no_bias=False)
ReLU7 = mx.symbol.LeakyReLU(name='ReLU7', data=conv5 , act_type='leaky', slope=0.1)
conv5_1 = mx.symbol.Convolution(name='conv5_1', data=ReLU7 , num_filter=512, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False)
ReLU8 = mx.symbol.LeakyReLU(name='ReLU8', data=conv5_1 , act_type='leaky', slope=0.1)
conv6 = mx.symbol.Convolution(name='conv6', data=ReLU8 , num_filter=1024, pad=(1,1), kernel=(3,3), stride=(2,2), no_bias=False)
ReLU9 = mx.symbol.LeakyReLU(name='ReLU9', data=conv6 , act_type='leaky', slope=0.1)
conv6_1 = mx.symbol.Convolution(name='conv6_1', data=ReLU9 , num_filter=1024, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False)
ReLU10 = mx.symbol.LeakyReLU(name='ReLU10', data=conv6_1 , act_type='leaky', slope=0.1)
Convolution1 = mx.symbol.Convolution(name='Convolution1', data=ReLU10 , num_filter=2, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False)
deconv5 = mx.symbol.Deconvolution(name='deconv5', data=ReLU10 , num_filter=512, pad=(0,0), kernel=(4,4), stride=(2,2), no_bias=False)
crop_deconv5 = mx.symbol.Crop(name='crop_deconv5', *[deconv5,ReLU8] , offset=(1,1))
ReLU11 = mx.symbol.LeakyReLU(name='ReLU11', data=crop_deconv5 , act_type='leaky', slope=0.1)
upsample_flow6to5 = mx.symbol.Deconvolution(name='upsample_flow6to5', data=Convolution1 , num_filter=2, pad=(0,0), kernel=(4,4), stride=(2,2), no_bias=False)
crop_upsampled_flow6_to_5 = mx.symbol.Crop(name='crop_upsampled_flow6_to_5', *[upsample_flow6to5,ReLU8] , offset=(1,1))
Concat2 = mx.symbol.Concat(name='Concat2', *[ReLU8,ReLU11,crop_upsampled_flow6_to_5] )
Convolution2 = mx.symbol.Convolution(name='Convolution2', data=Concat2 , num_filter=2, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False)
deconv4 = mx.symbol.Deconvolution(name='deconv4', data=Concat2 , num_filter=256, pad=(0,0), kernel=(4,4), stride=(2,2), no_bias=False)
crop_deconv4 = mx.symbol.Crop(name='crop_deconv4', *[deconv4,ReLU6] , offset=(1,1))
ReLU12 = mx.symbol.LeakyReLU(name='ReLU12', data=crop_deconv4 , act_type='leaky', slope=0.1)
upsample_flow5to4 = mx.symbol.Deconvolution(name='upsample_flow5to4', data=Convolution2 , num_filter=2, pad=(0,0), kernel=(4,4), stride=(2,2), no_bias=False)
crop_upsampled_flow5_to_4 = mx.symbol.Crop(name='crop_upsampled_flow5_to_4', *[upsample_flow5to4,ReLU6] , offset=(1,1))
Concat3 = mx.symbol.Concat(name='Concat3', *[ReLU6,ReLU12,crop_upsampled_flow5_to_4] )
Convolution3 = mx.symbol.Convolution(name='Convolution3', data=Concat3 , num_filter=2, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False)
deconv3 = mx.symbol.Deconvolution(name='deconv3', data=Concat3 , num_filter=128, pad=(0,0), kernel=(4,4), stride=(2,2), no_bias=False)
crop_deconv3 = mx.symbol.Crop(name='crop_deconv3', *[deconv3,ReLU4] , offset=(1,1))
ReLU13 = mx.symbol.LeakyReLU(name='ReLU13', data=crop_deconv3 , act_type='leaky', slope=0.1)
upsample_flow4to3 = mx.symbol.Deconvolution(name='upsample_flow4to3', data=Convolution3 , num_filter=2, pad=(0,0), kernel=(4,4), stride=(2,2), no_bias=False)
crop_upsampled_flow4_to_3 = mx.symbol.Crop(name='crop_upsampled_flow4_to_3', *[upsample_flow4to3,ReLU4] , offset=(1,1))
Concat4 = mx.symbol.Concat(name='Concat4', *[ReLU4,ReLU13,crop_upsampled_flow4_to_3] )
Convolution4 = mx.symbol.Convolution(name='Convolution4', data=Concat4 , num_filter=2, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False)
deconv2 = mx.symbol.Deconvolution(name='deconv2', data=Concat4 , num_filter=64, pad=(0,0), kernel=(4,4), stride=(2,2), no_bias=False)
crop_deconv2 = mx.symbol.Crop(name='crop_deconv2', *[deconv2,ReLU2] , offset=(1,1))
ReLU14 = mx.symbol.LeakyReLU(name='ReLU14', data=crop_deconv2 , act_type='leaky', slope=0.1)
upsample_flow3to2 = mx.symbol.Deconvolution(name='upsample_flow3to2', data=Convolution4 , num_filter=2, pad=(0,0), kernel=(4,4), stride=(2,2), no_bias=False)
crop_upsampled_flow3_to_2 = mx.symbol.Crop(name='crop_upsampled_flow3_to_2', *[upsample_flow3to2,ReLU2] , offset=(1,1))
Concat5 = mx.symbol.Concat(name='Concat5', *[ReLU2,ReLU14,crop_upsampled_flow3_to_2] )
Concat5 = mx.symbol.Pooling(name='resize_concat5', data=Concat5 , pooling_convention='full', pad=(0,0), kernel=(2,2), stride=(2,2), pool_type='avg')
Convolution5 = mx.symbol.Convolution(name='Convolution5', data=Concat5 , num_filter=2, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False)
Convolution5_scale_bias = mx.sym.Variable(name='Convolution5_scale_bias', lr_mult=0.0)
Convolution5_scale = mx.symbol.Convolution(name='Convolution5_scale', data=Concat5 , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1),
bias=Convolution5_scale_bias, no_bias=False)
return Convolution5 * 2.5, Convolution5_scale
def get_train_symbol(self, cfg):
# config alias for convenient
num_classes = cfg.dataset.NUM_CLASSES
data = mx.sym.Variable(name="data")
data_ref = mx.sym.Variable(name="data_ref")
eq_flag = mx.sym.Variable(name="eq_flag")
seg_cls_gt = mx.symbol.Variable(name='label')
# shared convolutional layers
conv_feat = self.get_resnet_v1(data_ref)
flow, scale_map = self.get_flownet(data, data_ref)
flow_grid = mx.sym.GridGenerator(data=flow, transform_type='warp', name='flow_grid')
warp_conv_feat = mx.sym.BilinearSampler(data=conv_feat, grid=flow_grid, name='warping_feat')
# warp_conv_feat = warp_conv_feat * scale_map
select_conv_feat = mx.sym.take(mx.sym.Concat(*[warp_conv_feat, conv_feat], dim=0), eq_flag)
# conv_feats = mx.sym.SliceChannel(select_conv_feat, axis=1, num_outputs=2)
# subsequent fc layers by haozhi
fc6_bias = mx.symbol.Variable('fc6_bias', lr_mult=2.0)
fc6_weight = mx.symbol.Variable('fc6_weight', lr_mult=1.0)
fc6 = mx.symbol.Convolution(data=select_conv_feat, kernel=(1, 1), pad=(0, 0), num_filter=1024, name="fc6",
bias=fc6_bias, weight=fc6_weight, workspace=self.workspace)
relu_fc6 = mx.sym.Activation(data=fc6, act_type='relu', name='relu_fc6')
score_bias = mx.symbol.Variable('score_bias', lr_mult=2.0)
score_weight = mx.symbol.Variable('score_weight', lr_mult=1.0)
score = mx.symbol.Convolution(data=relu_fc6, kernel=(1, 1), pad=(0, 0), num_filter=num_classes, name="score",
bias=score_bias, weight=score_weight, workspace=self.workspace)
upsampling = mx.symbol.Deconvolution(data=score, num_filter=num_classes, kernel=(32, 32), stride=(16, 16),
num_group=num_classes, no_bias=True, name='upsampling',
attr={'lr_mult': '0.0'}, workspace=self.workspace)
croped_score = mx.symbol.Crop(*[upsampling, data], offset=(8, 8), name='croped_score')
softmax = mx.symbol.SoftmaxOutput(data=croped_score, label=seg_cls_gt, normalization='valid', multi_output=True,
use_ignore=True, ignore_label=255, name="softmax")
group = mx.sym.Group([softmax, data_ref, eq_flag])
self.sym = group
return group
def get_key_test_symbol(self, cfg):
# config alias for convenient
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
num_anchors = cfg.network.NUM_ANCHORS
data = mx.sym.Variable(name="data")
data_key = mx.sym.Variable(name="data_key")
feat_key = mx.sym.Variable(name="feat_key")
# shared convolutional layers
conv_feat = self.get_resnet_dcn(data)
# deeplab
fc6_bias = mx.symbol.Variable('fc6_bias', lr_mult=2.0)
fc6_weight = mx.symbol.Variable('fc6_weight', lr_mult=1.0)
fc6 = mx.symbol.Convolution(
data=conv_feat, kernel=(1, 1), pad=(0, 0), num_filter=1024, name="fc6", bias=fc6_bias, weight=fc6_weight,
workspace=self.workspace)
relu_fc6 = mx.sym.Activation(data=fc6, act_type='relu', name='relu_fc6')
score_bias = mx.symbol.Variable('score_bias', lr_mult=2.0)
score_weight = mx.symbol.Variable('score_weight', lr_mult=1.0)
score = mx.symbol.Convolution(
data=relu_fc6, kernel=(1, 1), pad=(0, 0), num_filter=num_classes, name="score", bias=score_bias,
weight=score_weight, workspace=self.workspace)
upsampling = mx.symbol.Deconvolution(
data=score, num_filter=num_classes, kernel=(32, 32), stride=(16, 16), num_group=num_classes, no_bias=True,
name='upsampling', attr={'lr_mult': '0.0'}, workspace=self.workspace)
croped_score = mx.symbol.Crop(*[upsampling, data], offset=(8, 8), name='croped_score')
# softmax = mx.symbol.SoftmaxOutput(data=croped_score, normalization='valid', multi_output=True, use_ignore=True,
# ignore_label=255, name="softmax")
group = mx.sym.Group([data_key, feat_key, conv_feat, croped_score])
self.sym = group
return group
def get_cur_test_symbol(self, cfg):
# config alias for convenient
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
num_anchors = cfg.network.NUM_ANCHORS
data_cur = mx.sym.Variable(name="data")
data_key = mx.sym.Variable(name="data_key")
conv_feat = mx.sym.Variable(name="feat_key")
# conv_feat = self.get_resnet_v1(data_cur)
# feat_conv_3x3 = mx.sym.Convolution(
# data=conv_feat, kernel=(3, 3), pad=(6, 6), dilate=(6, 6), num_filter=1024, name="feat_conv_3x3")
# conv_feat = mx.sym.Activation(data=feat_conv_3x3, act_type="relu", name="feat_conv_3x3_relu")
# shared convolutional layers
flow, scale_map = self.get_flownet(data_cur, data_key)
flow_grid = mx.sym.GridGenerator(data=flow, transform_type='warp', name='flow_grid')
conv_feat = mx.sym.BilinearSampler(data=conv_feat, grid=flow_grid, name='warping_feat')
# conv_feat = conv_feat * scale_map
# deeplab
fc6_bias = mx.symbol.Variable('fc6_bias', lr_mult=2.0)
fc6_weight = mx.symbol.Variable('fc6_weight', lr_mult=1.0)
fc6 = mx.symbol.Convolution(
data=conv_feat, kernel=(1, 1), pad=(0, 0), num_filter=1024, name="fc6", bias=fc6_bias, weight=fc6_weight,
workspace=self.workspace)
relu_fc6 = mx.sym.Activation(data=fc6, act_type='relu', name='relu_fc6')
score_bias = mx.symbol.Variable('score_bias', lr_mult=2.0)
score_weight = mx.symbol.Variable('score_weight', lr_mult=1.0)
score = mx.symbol.Convolution(
data=relu_fc6, kernel=(1, 1), pad=(0, 0), num_filter=num_classes, name="score", bias=score_bias,
weight=score_weight, workspace=self.workspace)
upsampling = mx.symbol.Deconvolution(
data=score, num_filter=num_classes, kernel=(32, 32), stride=(16, 16), num_group=num_classes, no_bias=True,
name='upsampling', attr={'lr_mult': '0.0'}, workspace=self.workspace)
croped_score = mx.symbol.Crop(*[upsampling, data_cur], offset=(8, 8), name='croped_score')
# softmax = mx.symbol.SoftmaxOutput(data=croped_score, normalization='valid', multi_output=True, use_ignore=True,
# ignore_label=255, name="softmax")
group = mx.sym.Group([data_key, conv_feat, croped_score])
self.sym = group
return group
def get_batch_test_symbol(self, cfg):
# config alias for convenient
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
num_anchors = cfg.network.NUM_ANCHORS
data_key = mx.sym.Variable(name="data_key")
data_other = mx.sym.Variable(name="data_other")
im_info = mx.sym.Variable(name="im_info")
# shared convolutional layers
conv_feat_key = self.get_resnet_v1(data_key)
data_key_tiled = mx.sym.Custom(data_content=data_key, data_shape=data_other, op_type='tile_as')
conv_feat_key_tiled = mx.sym.Custom(data_content=conv_feat_key, data_shape=data_other, op_type='tile_as')
flow, scale_map = self.get_flownet(data_other, data_key_tiled)
flow_grid = mx.sym.GridGenerator(data=flow, transform_type='warp', name='flow_grid')
conv_feat_other = mx.sym.BilinearSampler(data=conv_feat_key_tiled, grid=flow_grid, name='warping_feat')
conv_feat_other = conv_feat_other * scale_map
conv_feat = mx.symbol.Concat(conv_feat_key, conv_feat_other, dim=0)
conv_feats = mx.sym.SliceChannel(conv_feat, axis=1, num_outputs=2)
# RPN
rpn_feat = conv_feats[0]
rpn_cls_score = mx.sym.Convolution(
data=rpn_feat, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.sym.Convolution(
data=rpn_feat, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
if cfg.network.NORMALIZE_RPN:
rpn_bbox_pred = mx.sym.Custom(
bbox_pred=rpn_bbox_pred, op_type='rpn_inv_normalize', num_anchors=num_anchors,
bbox_mean=cfg.network.ANCHOR_MEANS, bbox_std=cfg.network.ANCHOR_STDS)
# ROI Proposal
rpn_cls_score_reshape = mx.sym.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
rpn_cls_prob = mx.sym.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_prob")
rpn_cls_prob_reshape = mx.sym.Reshape(
data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape')
if cfg.TEST.CXX_PROPOSAL:
rois = mx.contrib.sym.MultiProposal(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES),
ratios=tuple(cfg.network.ANCHOR_RATIOS),
rpn_pre_nms_top_n=cfg.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TEST.RPN_POST_NMS_TOP_N,
threshold=cfg.TEST.RPN_NMS_THRESH, rpn_min_size=cfg.TEST.RPN_MIN_SIZE)
else:
NotImplemented
# res5
rfcn_feat = conv_feats[1]
rfcn_cls = mx.sym.Convolution(data=rfcn_feat, kernel=(1, 1), num_filter=7*7*num_classes, name="rfcn_cls")
rfcn_bbox = mx.sym.Convolution(data=rfcn_feat, kernel=(1, 1), num_filter=7*7*4*num_reg_classes, name="rfcn_bbox")
psroipooled_cls_rois = mx.contrib.sym.PSROIPooling(name='psroipooled_cls_rois', data=rfcn_cls, rois=rois, group_size=7, pooled_size=7,
output_dim=num_classes, spatial_scale=0.0625)
psroipooled_loc_rois = mx.contrib.sym.PSROIPooling(name='psroipooled_loc_rois', data=rfcn_bbox, rois=rois, group_size=7, pooled_size=7,
output_dim=8, spatial_scale=0.0625)
cls_score = mx.sym.Pooling(name='ave_cls_scors_rois', data=psroipooled_cls_rois, pool_type='avg', global_pool=True, kernel=(7, 7))
bbox_pred = mx.sym.Pooling(name='ave_bbox_pred_rois', data=psroipooled_loc_rois, pool_type='avg', global_pool=True, kernel=(7, 7))
# classification
cls_score = mx.sym.Reshape(name='cls_score_reshape', data=cls_score, shape=(-1, num_classes))
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
# bounding box regression
bbox_pred = mx.sym.Reshape(name='bbox_pred_reshape', data=bbox_pred, shape=(-1, 4 * num_reg_classes))
# reshape output
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_reshape')
# group output
group = mx.sym.Group([rois, cls_prob, bbox_pred])
self.sym = group
return group
def init_weight(self, cfg, arg_params, aux_params):
arg_params['fc6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc6_weight'])
arg_params['fc6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc6_bias'])
arg_params['score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['score_weight'])
arg_params['score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['score_bias'])
arg_params['upsampling_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['upsampling_weight'])
init = mx.init.Initializer()
init._init_bilinear('upsample_weight', arg_params['upsampling_weight'])
# arg_params['Convolution5_scale_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['Convolution5_scale_weight'])
# arg_params['Convolution5_scale_bias'] = mx.nd.ones(shape=self.arg_shape_dict['Convolution5_scale_bias'])
|
py | b40976f312a95a3be926607196da44ff297b0b03 | # Copyright 2021 Invana
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RequestStateTypes:
STARTED = "STARTED"
RESPONSE_RECEIVED = "RESPONSE_RECEIVED" # this status can be many for async execution
FINISHED = "FINISHED"
SERVER_DISCONNECTED = "SERVER_DISCONNECTED"
RUNTIME_ERROR = "RUNTIME_ERROR"
CLIENT_CONNECTION_ERROR = "CLIENT_CONNECTION_ERROR"
@classmethod
def get_allowed_types(cls):
return [k for k in list(cls.__dict__.keys()) if not k.startswith("__") and k.isupper()]
class QueryResponseStatusTypes:
SUCCESS = "SUCCESS"
FAILED = "FAILED"
@classmethod
def get_allowed_types(cls):
return [k for k in list(cls.__dict__.keys()) if not k.startswith("__") and k.isupper()]
class QueryResponseErrorReasonTypes:
# theses are error statuses when query response is received
TIMED_OUT = "TIMED_OUT"
INVALID_QUERY = "INVALID_QUERY"
OTHER = "OTHER"
@classmethod
def get_allowed_types(cls):
return [k for k in list(cls.__dict__.keys()) if not k.startswith("__") and k.isupper()]
class GremlinServerErrorStatusCodes:
ERROR_401 = "UNAUTHORIZED"
ERROR_403 = "FORBIDDEN"
ERROR_407 = "AUTHENTICATE"
ERROR_497 = "REQUEST ERROR SERIALIZATION"
ERROR_498 = "REQUEST ERROR MALFORMED REQUEST"
ERROR_499 = "REQUEST ERROR INVALID REQUEST ARGUMENTS"
ERROR_500 = "SERVER ERROR"
ERROR_596 = "SERVER ERROR TEMPORARY"
ERROR_597 = "SERVER ERROR EVALUATION"
ERROR_598 = "SERVER ERROR TIMEOUT"
ERROR_599 = "SERVER ERROR SERIALIZATION"
class ConnectionStateTypes:
CONNECTED = "CONNECTED"
CONNECTING = "CONNECTING"
RECONNECTING = "RECONNECTING"
DISCONNECTING = "DISCONNECTING"
DISCONNECTED = "DISCONNECTED"
|
py | b409771ae9cd71a1ad40e4823d3bc98e1503fbc7 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class RestaurantPrinter(models.Model):
_inherit = 'restaurant.printer'
printer_type = fields.Selection(selection_add=[('epson_epos', 'Use an Epson printer')])
epson_printer_ip = fields.Char(string='Epson Receipt Printer IP Address', help="Local IP address of an Epson receipt printer.")
|
py | b409780ffa7c49bf46714f582ee555ffe2b812e6 | import brew_view as bv
bv.TESTING = True
|
py | b409783abd55b47eb299e545497c75d1fda3e508 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetInstanceView(Model):
"""The instance view of a virtual machine scale set.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar virtual_machine: The instance view status summary for the virtual
machine scale set.
:vartype virtual_machine:
~azure.mgmt.compute.v2018_06_01.models.VirtualMachineScaleSetInstanceViewStatusesSummary
:ivar extensions: The extensions information.
:vartype extensions:
list[~azure.mgmt.compute.v2018_06_01.models.VirtualMachineScaleSetVMExtensionsSummary]
:param statuses: The resource status information.
:type statuses:
list[~azure.mgmt.compute.v2018_06_01.models.InstanceViewStatus]
"""
_validation = {
'virtual_machine': {'readonly': True},
'extensions': {'readonly': True},
}
_attribute_map = {
'virtual_machine': {'key': 'virtualMachine', 'type': 'VirtualMachineScaleSetInstanceViewStatusesSummary'},
'extensions': {'key': 'extensions', 'type': '[VirtualMachineScaleSetVMExtensionsSummary]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(self, **kwargs):
super(VirtualMachineScaleSetInstanceView, self).__init__(**kwargs)
self.virtual_machine = None
self.extensions = None
self.statuses = kwargs.get('statuses', None)
|
py | b4097845ce16d474d46518b14b5f9b19c7515051 | '''
Various small, useful functions which have no other home.
'''
import dpkt
def inet_ntoa(ip):
if len(ip) != 4:
raise ValueError("Incorrect IP")
return (str(ord(ip[0])) + "." + str(ord(ip[1])) + "." + str(ord(ip[2])) +
"." + str(ord(ip[3])))
def friendly_tcp_flags(flags):
'''
returns a string containing a user-friendly representation of the tcp flags
'''
# create mapping of flags to string repr's
d = {dpkt.tcp.TH_FIN:'FIN', dpkt.tcp.TH_SYN:'SYN', dpkt.tcp.TH_RST:'RST', dpkt.tcp.TH_PUSH:'PUSH', dpkt.tcp.TH_ACK:'ACK', dpkt.tcp.TH_URG:'URG', dpkt.tcp.TH_ECE:'ECE', dpkt.tcp.TH_CWR:'CWR'}
#make a list of the flags that are activated
active_flags = filter(lambda t: t[0] & flags, d.iteritems())
#join all their string representations with '|'
return '|'.join(t[1] for t in active_flags)
def friendly_socket(sock):
'''
returns a socket where the addresses are converted by inet_ntoa into
human-friendly strings. sock is in tuple format, like
((sip, sport),(dip, sport))
'''
return '((%s, %d), (%s, %d))' % (
inet_ntoa(sock[0][0]),
sock[0][1],
inet_ntoa(sock[1][0]),
sock[1][1]
)
def friendly_data(str):
'''
convert (possibly binary) data into a form readable by people on terminals
'''
return `str`
def ms_from_timedelta(td):
'''
gets the number of ms in td, which is datetime.timedelta.
Modified from here:
http://docs.python.org/library/datetime.html#datetime.timedelta, near the
end of the section.
'''
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**3
def ms_from_dpkt_time(td):
'''
Get milliseconds from a dpkt timestamp. This should probably only really be
done on a number gotten from subtracting two dpkt timestamps.
'''
return int(td * 1000) # um, I guess
class ModifiedReader(object):
"""
A copy of the dpkt pcap Reader. The only change is that the iterator
yields the pcap packet header as well, so it's possible to check the true
frame length, among other things.
stolen from pyper.
"""
def __init__(self, fileobj):
self.__f = fileobj
buf = self.__f.read(dpkt.pcap.FileHdr.__hdr_len__)
self.__fh = dpkt.pcap.FileHdr(buf)
self.__ph = dpkt.pcap.PktHdr
if self.__fh.magic == dpkt.pcap.PMUDPCT_MAGIC:
self.__fh = dpkt.pcap.LEFileHdr(buf)
self.__ph = dpkt.pcap.LEPktHdr
elif self.__fh.magic != dpkt.pcap.TCPDUMP_MAGIC:
raise ValueError, 'invalid tcpdump header'
self.snaplen = self.__fh.snaplen
self.dloff = dpkt.pcap.dltoff[self.__fh.linktype]
self.filter = ''
def datalink(self):
return self.__fh.linktype
def setfilter(self, value, optimize=1):
return NotImplementedError
def readpkts(self):
return list(self)
def dispatch(self, cnt, callback, *args):
if cnt > 0:
for i in range(cnt):
ts, pkt = self.next()
callback(ts, pkt, *args)
else:
for ts, pkt in self:
callback(ts, pkt, *args)
def loop(self, callback, *args):
self.dispatch(0, callback, *args)
def __iter__(self):
self.__f.seek(dpkt.pcap.FileHdr.__hdr_len__)
while 1:
buf = self.__f.read(dpkt.pcap.PktHdr.__hdr_len__)
if not buf: break
hdr = self.__ph(buf)
buf = self.__f.read(hdr.caplen)
yield (hdr.tv_sec + (hdr.tv_usec / 1000000.0), buf, hdr)
|
py | b409786dc071ebaf02765d9ced4a7610874a24a0 | """The tests for the Group Light platform."""
from os import path
import unittest.mock
from unittest.mock import MagicMock, patch
from homeassistant import config as hass_config
from homeassistant.components.group import DOMAIN, SERVICE_RELOAD
import homeassistant.components.group.light as group
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_MODE,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_EFFECT_LIST,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_MAX_MIREDS,
ATTR_MIN_MIREDS,
ATTR_RGB_COLOR,
ATTR_RGBW_COLOR,
ATTR_RGBWW_COLOR,
ATTR_SUPPORTED_COLOR_MODES,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
ATTR_XY_COLOR,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_HS,
COLOR_MODE_RGBW,
COLOR_MODE_RGBWW,
DOMAIN as LIGHT_DOMAIN,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.setup import async_setup_component
async def test_default_state(hass):
"""Test light group default state."""
hass.states.async_set("light.kitchen", "on")
await async_setup_component(
hass,
LIGHT_DOMAIN,
{
LIGHT_DOMAIN: {
"platform": DOMAIN,
"entities": ["light.kitchen", "light.bedroom"],
"name": "Bedroom Group",
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("light.bedroom_group")
assert state is not None
assert state.state == STATE_ON
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
assert state.attributes.get(ATTR_ENTITY_ID) == ["light.kitchen", "light.bedroom"]
assert state.attributes.get(ATTR_BRIGHTNESS) is None
assert state.attributes.get(ATTR_HS_COLOR) is None
assert state.attributes.get(ATTR_COLOR_TEMP) is None
assert state.attributes.get(ATTR_WHITE_VALUE) is None
assert state.attributes.get(ATTR_EFFECT_LIST) is None
assert state.attributes.get(ATTR_EFFECT) is None
async def test_state_reporting(hass):
"""Test the state reporting."""
await async_setup_component(
hass,
LIGHT_DOMAIN,
{
LIGHT_DOMAIN: {
"platform": DOMAIN,
"entities": ["light.test1", "light.test2"],
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
hass.states.async_set("light.test1", STATE_ON)
hass.states.async_set("light.test2", STATE_UNAVAILABLE)
await hass.async_block_till_done()
assert hass.states.get("light.light_group").state == STATE_ON
hass.states.async_set("light.test1", STATE_ON)
hass.states.async_set("light.test2", STATE_OFF)
await hass.async_block_till_done()
assert hass.states.get("light.light_group").state == STATE_ON
hass.states.async_set("light.test1", STATE_OFF)
hass.states.async_set("light.test2", STATE_OFF)
await hass.async_block_till_done()
assert hass.states.get("light.light_group").state == STATE_OFF
hass.states.async_set("light.test1", STATE_UNAVAILABLE)
hass.states.async_set("light.test2", STATE_UNAVAILABLE)
await hass.async_block_till_done()
assert hass.states.get("light.light_group").state == STATE_UNAVAILABLE
async def test_brightness(hass):
"""Test brightness reporting."""
platform = getattr(hass.components, "test.light")
platform.init(empty=True)
platform.ENTITIES.append(platform.MockLight("test1", STATE_ON))
platform.ENTITIES.append(platform.MockLight("test2", STATE_OFF))
entity0 = platform.ENTITIES[0]
entity0.supported_color_modes = {COLOR_MODE_BRIGHTNESS}
entity0.color_mode = COLOR_MODE_BRIGHTNESS
entity0.brightness = 255
entity1 = platform.ENTITIES[1]
entity1.supported_features = SUPPORT_BRIGHTNESS
assert await async_setup_component(
hass,
LIGHT_DOMAIN,
{
LIGHT_DOMAIN: [
{"platform": "test"},
{
"platform": DOMAIN,
"entities": ["light.test1", "light.test2"],
},
]
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.state == STATE_ON
assert state.attributes[ATTR_BRIGHTNESS] == 255
assert state.attributes[ATTR_COLOR_MODE] == "brightness"
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
assert state.attributes[ATTR_SUPPORTED_COLOR_MODES] == ["brightness"]
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": [entity1.entity_id], ATTR_BRIGHTNESS: 100},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.state == STATE_ON
assert state.attributes[ATTR_BRIGHTNESS] == 177
assert state.attributes[ATTR_COLOR_MODE] == "brightness"
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
assert state.attributes[ATTR_SUPPORTED_COLOR_MODES] == ["brightness"]
await hass.services.async_call(
"light",
"turn_off",
{"entity_id": [entity0.entity_id]},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.state == STATE_ON
assert state.attributes[ATTR_BRIGHTNESS] == 100
assert state.attributes[ATTR_COLOR_MODE] == "brightness"
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
assert state.attributes[ATTR_SUPPORTED_COLOR_MODES] == ["brightness"]
async def test_color_hs(hass):
"""Test hs color reporting."""
platform = getattr(hass.components, "test.light")
platform.init(empty=True)
platform.ENTITIES.append(platform.MockLight("test1", STATE_ON))
platform.ENTITIES.append(platform.MockLight("test2", STATE_OFF))
entity0 = platform.ENTITIES[0]
entity0.supported_color_modes = {COLOR_MODE_HS}
entity0.color_mode = COLOR_MODE_HS
entity0.brightness = 255
entity0.hs_color = (0, 100)
entity1 = platform.ENTITIES[1]
entity1.supported_features = SUPPORT_COLOR
assert await async_setup_component(
hass,
LIGHT_DOMAIN,
{
LIGHT_DOMAIN: [
{"platform": "test"},
{
"platform": DOMAIN,
"entities": ["light.test1", "light.test2"],
},
]
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.state == STATE_ON
assert state.attributes[ATTR_COLOR_MODE] == "hs"
assert state.attributes[ATTR_HS_COLOR] == (0, 100)
assert state.attributes[ATTR_SUPPORTED_COLOR_MODES] == ["hs"]
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": [entity1.entity_id], ATTR_HS_COLOR: (0, 50)},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_COLOR_MODE] == "hs"
assert state.attributes[ATTR_HS_COLOR] == (0, 75)
assert state.attributes[ATTR_SUPPORTED_COLOR_MODES] == ["hs"]
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
await hass.services.async_call(
"light",
"turn_off",
{"entity_id": [entity0.entity_id]},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_COLOR_MODE] == "hs"
assert state.attributes[ATTR_HS_COLOR] == (0, 50)
assert state.attributes[ATTR_SUPPORTED_COLOR_MODES] == ["hs"]
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
async def test_color_rgbw(hass):
"""Test rgbw color reporting."""
platform = getattr(hass.components, "test.light")
platform.init(empty=True)
platform.ENTITIES.append(platform.MockLight("test1", STATE_ON))
platform.ENTITIES.append(platform.MockLight("test2", STATE_OFF))
entity0 = platform.ENTITIES[0]
entity0.supported_color_modes = {COLOR_MODE_RGBW}
entity0.color_mode = COLOR_MODE_RGBW
entity0.brightness = 255
entity0.rgbw_color = (0, 64, 128, 255)
entity1 = platform.ENTITIES[1]
entity1.supported_color_modes = {COLOR_MODE_RGBW}
entity1.color_mode = COLOR_MODE_RGBW
entity1.brightness = 255
entity1.rgbw_color = (255, 128, 64, 0)
assert await async_setup_component(
hass,
LIGHT_DOMAIN,
{
LIGHT_DOMAIN: [
{"platform": "test"},
{
"platform": DOMAIN,
"entities": ["light.test1", "light.test2"],
},
]
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.state == STATE_ON
assert state.attributes[ATTR_COLOR_MODE] == "rgbw"
assert state.attributes[ATTR_RGBW_COLOR] == (0, 64, 128, 255)
assert state.attributes[ATTR_SUPPORTED_COLOR_MODES] == ["rgbw"]
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": [entity1.entity_id]},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_COLOR_MODE] == "rgbw"
assert state.attributes[ATTR_RGBW_COLOR] == (127, 96, 96, 127)
assert state.attributes[ATTR_SUPPORTED_COLOR_MODES] == ["rgbw"]
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
await hass.services.async_call(
"light",
"turn_off",
{"entity_id": [entity0.entity_id]},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_COLOR_MODE] == "rgbw"
assert state.attributes[ATTR_RGBW_COLOR] == (255, 128, 64, 0)
assert state.attributes[ATTR_SUPPORTED_COLOR_MODES] == ["rgbw"]
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
async def test_color_rgbww(hass):
"""Test rgbww color reporting."""
platform = getattr(hass.components, "test.light")
platform.init(empty=True)
platform.ENTITIES.append(platform.MockLight("test1", STATE_ON))
platform.ENTITIES.append(platform.MockLight("test2", STATE_OFF))
entity0 = platform.ENTITIES[0]
entity0.supported_color_modes = {COLOR_MODE_RGBWW}
entity0.color_mode = COLOR_MODE_RGBWW
entity0.brightness = 255
entity0.rgbww_color = (0, 32, 64, 128, 255)
entity1 = platform.ENTITIES[1]
entity1.supported_color_modes = {COLOR_MODE_RGBWW}
entity1.color_mode = COLOR_MODE_RGBWW
entity1.brightness = 255
entity1.rgbww_color = (255, 128, 64, 32, 0)
assert await async_setup_component(
hass,
LIGHT_DOMAIN,
{
LIGHT_DOMAIN: [
{"platform": "test"},
{
"platform": DOMAIN,
"entities": ["light.test1", "light.test2"],
},
]
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.state == STATE_ON
assert state.attributes[ATTR_COLOR_MODE] == "rgbww"
assert state.attributes[ATTR_RGBWW_COLOR] == (0, 32, 64, 128, 255)
assert state.attributes[ATTR_SUPPORTED_COLOR_MODES] == ["rgbww"]
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": [entity1.entity_id]},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_COLOR_MODE] == "rgbww"
assert state.attributes[ATTR_RGBWW_COLOR] == (127, 80, 64, 80, 127)
assert state.attributes[ATTR_SUPPORTED_COLOR_MODES] == ["rgbww"]
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
await hass.services.async_call(
"light",
"turn_off",
{"entity_id": [entity0.entity_id]},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_COLOR_MODE] == "rgbww"
assert state.attributes[ATTR_RGBWW_COLOR] == (255, 128, 64, 32, 0)
assert state.attributes[ATTR_SUPPORTED_COLOR_MODES] == ["rgbww"]
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
async def test_white_value(hass):
"""Test white value reporting."""
await async_setup_component(
hass,
LIGHT_DOMAIN,
{
LIGHT_DOMAIN: {
"platform": DOMAIN,
"entities": ["light.test1", "light.test2"],
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
hass.states.async_set(
"light.test1", STATE_ON, {ATTR_WHITE_VALUE: 255, ATTR_SUPPORTED_FEATURES: 128}
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 128
assert state.attributes[ATTR_WHITE_VALUE] == 255
hass.states.async_set(
"light.test2", STATE_ON, {ATTR_WHITE_VALUE: 100, ATTR_SUPPORTED_FEATURES: 128}
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 128
assert state.attributes[ATTR_WHITE_VALUE] == 177
hass.states.async_set(
"light.test1", STATE_OFF, {ATTR_WHITE_VALUE: 255, ATTR_SUPPORTED_FEATURES: 128}
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 128
assert state.attributes[ATTR_WHITE_VALUE] == 100
async def test_color_temp(hass):
"""Test color temp reporting."""
platform = getattr(hass.components, "test.light")
platform.init(empty=True)
platform.ENTITIES.append(platform.MockLight("test1", STATE_ON))
platform.ENTITIES.append(platform.MockLight("test2", STATE_OFF))
entity0 = platform.ENTITIES[0]
entity0.supported_color_modes = {COLOR_MODE_COLOR_TEMP}
entity0.color_mode = COLOR_MODE_COLOR_TEMP
entity0.brightness = 255
entity0.color_temp = 2
entity1 = platform.ENTITIES[1]
entity1.supported_features = SUPPORT_COLOR_TEMP
assert await async_setup_component(
hass,
LIGHT_DOMAIN,
{
LIGHT_DOMAIN: [
{"platform": "test"},
{
"platform": DOMAIN,
"entities": ["light.test1", "light.test2"],
},
]
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_COLOR_MODE] == "color_temp"
assert state.attributes[ATTR_COLOR_TEMP] == 2
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
assert state.attributes[ATTR_SUPPORTED_COLOR_MODES] == ["color_temp"]
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": [entity1.entity_id], ATTR_COLOR_TEMP: 1000},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_COLOR_MODE] == "color_temp"
assert state.attributes[ATTR_COLOR_TEMP] == 501
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
assert state.attributes[ATTR_SUPPORTED_COLOR_MODES] == ["color_temp"]
await hass.services.async_call(
"light",
"turn_off",
{"entity_id": [entity0.entity_id]},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_COLOR_MODE] == "color_temp"
assert state.attributes[ATTR_COLOR_TEMP] == 1000
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
assert state.attributes[ATTR_SUPPORTED_COLOR_MODES] == ["color_temp"]
async def test_emulated_color_temp_group(hass):
"""Test emulated color temperature in a group."""
platform = getattr(hass.components, "test.light")
platform.init(empty=True)
platform.ENTITIES.append(platform.MockLight("test1", STATE_ON))
platform.ENTITIES.append(platform.MockLight("test2", STATE_OFF))
platform.ENTITIES.append(platform.MockLight("test3", STATE_OFF))
entity0 = platform.ENTITIES[0]
entity0.supported_color_modes = {COLOR_MODE_COLOR_TEMP}
entity0.color_mode = COLOR_MODE_COLOR_TEMP
entity1 = platform.ENTITIES[1]
entity1.supported_color_modes = {COLOR_MODE_COLOR_TEMP, COLOR_MODE_HS}
entity1.color_mode = COLOR_MODE_COLOR_TEMP
entity2 = platform.ENTITIES[2]
entity2.supported_color_modes = {COLOR_MODE_HS}
entity2.color_mode = COLOR_MODE_HS
assert await async_setup_component(
hass,
LIGHT_DOMAIN,
{
LIGHT_DOMAIN: [
{"platform": "test"},
{
"platform": DOMAIN,
"entities": ["light.test1", "light.test2", "light.test3"],
},
]
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
await hass.async_block_till_done()
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.light_group", ATTR_COLOR_TEMP: 200},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.test1")
assert state.state == STATE_ON
assert state.attributes[ATTR_COLOR_TEMP] == 200
assert ATTR_HS_COLOR not in state.attributes.keys()
state = hass.states.get("light.test2")
assert state.state == STATE_ON
assert state.attributes[ATTR_COLOR_TEMP] == 200
assert ATTR_HS_COLOR not in state.attributes.keys()
state = hass.states.get("light.test3")
assert state.state == STATE_ON
assert state.attributes[ATTR_HS_COLOR] == (27.001, 19.243)
async def test_min_max_mireds(hass):
"""Test min/max mireds reporting.
min/max mireds is reported both when light is on and off
"""
platform = getattr(hass.components, "test.light")
platform.init(empty=True)
platform.ENTITIES.append(platform.MockLight("test1", STATE_ON))
platform.ENTITIES.append(platform.MockLight("test2", STATE_OFF))
entity0 = platform.ENTITIES[0]
entity0.supported_color_modes = {COLOR_MODE_COLOR_TEMP}
entity0.color_mode = COLOR_MODE_COLOR_TEMP
entity0.color_temp = 2
entity0.min_mireds = 2
entity0.max_mireds = 5
entity1 = platform.ENTITIES[1]
entity1.supported_features = SUPPORT_COLOR_TEMP
entity1.min_mireds = 1
entity1.max_mireds = 1234567890
assert await async_setup_component(
hass,
LIGHT_DOMAIN,
{
LIGHT_DOMAIN: [
{"platform": "test"},
{
"platform": DOMAIN,
"entities": ["light.test1", "light.test2"],
},
]
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_MIN_MIREDS] == 1
assert state.attributes[ATTR_MAX_MIREDS] == 1234567890
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": [entity0.entity_id]},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_MIN_MIREDS] == 1
assert state.attributes[ATTR_MAX_MIREDS] == 1234567890
await hass.services.async_call(
"light",
"turn_off",
{"entity_id": [entity0.entity_id]},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_MIN_MIREDS] == 1
assert state.attributes[ATTR_MAX_MIREDS] == 1234567890
async def test_effect_list(hass):
"""Test effect_list reporting."""
await async_setup_component(
hass,
LIGHT_DOMAIN,
{
LIGHT_DOMAIN: {
"platform": DOMAIN,
"entities": ["light.test1", "light.test2"],
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
hass.states.async_set(
"light.test1",
STATE_ON,
{ATTR_EFFECT_LIST: ["None", "Random", "Colorloop"], ATTR_SUPPORTED_FEATURES: 4},
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert set(state.attributes[ATTR_EFFECT_LIST]) == {"None", "Random", "Colorloop"}
# These ensure the output is sorted as expected
assert state.attributes[ATTR_EFFECT_LIST][0] == "None"
assert state.attributes[ATTR_EFFECT_LIST][1] == "Colorloop"
assert state.attributes[ATTR_EFFECT_LIST][2] == "Random"
hass.states.async_set(
"light.test2",
STATE_ON,
{ATTR_EFFECT_LIST: ["None", "Random", "Rainbow"], ATTR_SUPPORTED_FEATURES: 4},
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert set(state.attributes[ATTR_EFFECT_LIST]) == {
"None",
"Random",
"Colorloop",
"Rainbow",
}
hass.states.async_set(
"light.test1",
STATE_OFF,
{ATTR_EFFECT_LIST: ["None", "Colorloop", "Seven"], ATTR_SUPPORTED_FEATURES: 4},
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert set(state.attributes[ATTR_EFFECT_LIST]) == {
"None",
"Random",
"Colorloop",
"Seven",
"Rainbow",
}
async def test_effect(hass):
"""Test effect reporting."""
await async_setup_component(
hass,
LIGHT_DOMAIN,
{
LIGHT_DOMAIN: {
"platform": DOMAIN,
"entities": ["light.test1", "light.test2", "light.test3"],
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
hass.states.async_set(
"light.test1", STATE_ON, {ATTR_EFFECT: "None", ATTR_SUPPORTED_FEATURES: 6}
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_EFFECT] == "None"
hass.states.async_set(
"light.test2", STATE_ON, {ATTR_EFFECT: "None", ATTR_SUPPORTED_FEATURES: 6}
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_EFFECT] == "None"
hass.states.async_set(
"light.test3", STATE_ON, {ATTR_EFFECT: "Random", ATTR_SUPPORTED_FEATURES: 6}
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_EFFECT] == "None"
hass.states.async_set(
"light.test1", STATE_OFF, {ATTR_EFFECT: "None", ATTR_SUPPORTED_FEATURES: 6}
)
hass.states.async_set(
"light.test2", STATE_OFF, {ATTR_EFFECT: "None", ATTR_SUPPORTED_FEATURES: 6}
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_EFFECT] == "Random"
async def test_supported_color_modes(hass):
"""Test supported_color_modes reporting."""
platform = getattr(hass.components, "test.light")
platform.init(empty=True)
platform.ENTITIES.append(platform.MockLight("test1", STATE_ON))
platform.ENTITIES.append(platform.MockLight("test2", STATE_OFF))
platform.ENTITIES.append(platform.MockLight("test3", STATE_OFF))
entity0 = platform.ENTITIES[0]
entity0.supported_color_modes = {COLOR_MODE_COLOR_TEMP, COLOR_MODE_HS}
entity1 = platform.ENTITIES[1]
entity1.supported_color_modes = {COLOR_MODE_RGBW, COLOR_MODE_RGBWW}
entity2 = platform.ENTITIES[2]
entity2.supported_features = SUPPORT_BRIGHTNESS
assert await async_setup_component(
hass,
LIGHT_DOMAIN,
{
LIGHT_DOMAIN: [
{"platform": "test"},
{
"platform": DOMAIN,
"entities": ["light.test1", "light.test2", "light.test3"],
},
]
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert set(state.attributes[ATTR_SUPPORTED_COLOR_MODES]) == {
"brightness",
"color_temp",
"hs",
"rgbw",
"rgbww",
}
async def test_color_mode(hass):
"""Test color_mode reporting."""
platform = getattr(hass.components, "test.light")
platform.init(empty=True)
platform.ENTITIES.append(platform.MockLight("test1", STATE_ON))
platform.ENTITIES.append(platform.MockLight("test2", STATE_OFF))
platform.ENTITIES.append(platform.MockLight("test3", STATE_OFF))
entity0 = platform.ENTITIES[0]
entity0.supported_color_modes = {COLOR_MODE_COLOR_TEMP, COLOR_MODE_HS}
entity0.color_mode = COLOR_MODE_COLOR_TEMP
entity1 = platform.ENTITIES[1]
entity1.supported_color_modes = {COLOR_MODE_COLOR_TEMP, COLOR_MODE_HS}
entity1.color_mode = COLOR_MODE_COLOR_TEMP
entity2 = platform.ENTITIES[2]
entity2.supported_color_modes = {COLOR_MODE_COLOR_TEMP, COLOR_MODE_HS}
entity2.color_mode = COLOR_MODE_HS
assert await async_setup_component(
hass,
LIGHT_DOMAIN,
{
LIGHT_DOMAIN: [
{"platform": "test"},
{
"platform": DOMAIN,
"entities": ["light.test1", "light.test2", "light.test3"],
},
]
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_COLOR_MODE] == COLOR_MODE_COLOR_TEMP
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": [entity1.entity_id]},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_COLOR_MODE] == COLOR_MODE_COLOR_TEMP
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": [entity2.entity_id]},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_COLOR_MODE] == COLOR_MODE_COLOR_TEMP
await hass.services.async_call(
"light",
"turn_off",
{"entity_id": [entity0.entity_id, entity1.entity_id]},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_COLOR_MODE] == COLOR_MODE_HS
async def test_supported_features(hass):
"""Test supported features reporting."""
await async_setup_component(
hass,
LIGHT_DOMAIN,
{
LIGHT_DOMAIN: {
"platform": DOMAIN,
"entities": ["light.test1", "light.test2"],
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
hass.states.async_set("light.test1", STATE_ON, {ATTR_SUPPORTED_FEATURES: 0})
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
# SUPPORT_COLOR_TEMP = 2
# SUPPORT_COLOR_TEMP = 2 will be blocked in favour of COLOR_MODE_COLOR_TEMP
hass.states.async_set("light.test2", STATE_ON, {ATTR_SUPPORTED_FEATURES: 2})
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
# SUPPORT_TRANSITION | SUPPORT_FLASH | SUPPORT_BRIGHTNESS = 41
# SUPPORT_BRIGHTNESS = 1 will be translated to COLOR_MODE_BRIGHTNESS
hass.states.async_set("light.test1", STATE_OFF, {ATTR_SUPPORTED_FEATURES: 41})
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
# SUPPORT_TRANSITION | SUPPORT_FLASH = 40
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 40
# Test that unknown feature 256 is blocked
hass.states.async_set("light.test2", STATE_OFF, {ATTR_SUPPORTED_FEATURES: 256})
await hass.async_block_till_done()
state = hass.states.get("light.light_group")
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 40
async def test_service_calls(hass):
"""Test service calls."""
await async_setup_component(
hass,
LIGHT_DOMAIN,
{
LIGHT_DOMAIN: [
{"platform": "demo"},
{
"platform": DOMAIN,
"entities": [
"light.bed_light",
"light.ceiling_lights",
"light.kitchen_lights",
],
},
]
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.get("light.light_group").state == STATE_ON
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TOGGLE,
{ATTR_ENTITY_ID: "light.light_group"},
blocking=True,
)
assert hass.states.get("light.bed_light").state == STATE_OFF
assert hass.states.get("light.ceiling_lights").state == STATE_OFF
assert hass.states.get("light.kitchen_lights").state == STATE_OFF
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.light_group"},
blocking=True,
)
assert hass.states.get("light.bed_light").state == STATE_ON
assert hass.states.get("light.ceiling_lights").state == STATE_ON
assert hass.states.get("light.kitchen_lights").state == STATE_ON
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.light_group"},
blocking=True,
)
assert hass.states.get("light.bed_light").state == STATE_OFF
assert hass.states.get("light.ceiling_lights").state == STATE_OFF
assert hass.states.get("light.kitchen_lights").state == STATE_OFF
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: "light.light_group",
ATTR_BRIGHTNESS: 128,
ATTR_EFFECT: "Random",
ATTR_RGB_COLOR: (42, 255, 255),
},
blocking=True,
)
state = hass.states.get("light.bed_light")
assert state.state == STATE_ON
assert state.attributes[ATTR_BRIGHTNESS] == 128
assert state.attributes[ATTR_EFFECT] == "Random"
assert state.attributes[ATTR_RGB_COLOR] == (42, 255, 255)
state = hass.states.get("light.ceiling_lights")
assert state.state == STATE_ON
assert state.attributes[ATTR_BRIGHTNESS] == 128
assert state.attributes[ATTR_RGB_COLOR] == (42, 255, 255)
state = hass.states.get("light.kitchen_lights")
assert state.state == STATE_ON
assert state.attributes[ATTR_BRIGHTNESS] == 128
assert state.attributes[ATTR_RGB_COLOR] == (42, 255, 255)
async def test_invalid_service_calls(hass):
"""Test invalid service call arguments get discarded."""
add_entities = MagicMock()
await group.async_setup_platform(
hass, {"entities": ["light.test1", "light.test2"]}, add_entities
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert add_entities.call_count == 1
grouped_light = add_entities.call_args[0][0][0]
grouped_light.hass = hass
with unittest.mock.patch.object(hass.services, "async_call") as mock_call:
await grouped_light.async_turn_on(brightness=150, four_oh_four="404")
data = {ATTR_ENTITY_ID: ["light.test1", "light.test2"], ATTR_BRIGHTNESS: 150}
mock_call.assert_called_once_with(
LIGHT_DOMAIN, SERVICE_TURN_ON, data, blocking=True, context=None
)
mock_call.reset_mock()
await grouped_light.async_turn_off(transition=4, four_oh_four="404")
data = {ATTR_ENTITY_ID: ["light.test1", "light.test2"], ATTR_TRANSITION: 4}
mock_call.assert_called_once_with(
LIGHT_DOMAIN, SERVICE_TURN_OFF, data, blocking=True, context=None
)
mock_call.reset_mock()
data = {
ATTR_BRIGHTNESS: 150,
ATTR_XY_COLOR: (0.5, 0.42),
ATTR_RGB_COLOR: (80, 120, 50),
ATTR_COLOR_TEMP: 1234,
ATTR_WHITE_VALUE: 1,
ATTR_EFFECT: "Sunshine",
ATTR_TRANSITION: 4,
ATTR_FLASH: "long",
}
await grouped_light.async_turn_on(**data)
data[ATTR_ENTITY_ID] = ["light.test1", "light.test2"]
mock_call.assert_called_once_with(
LIGHT_DOMAIN, SERVICE_TURN_ON, data, blocking=True, context=None
)
async def test_reload(hass):
"""Test the ability to reload lights."""
await async_setup_component(
hass,
LIGHT_DOMAIN,
{
LIGHT_DOMAIN: [
{"platform": "demo"},
{
"platform": DOMAIN,
"entities": [
"light.bed_light",
"light.ceiling_lights",
"light.kitchen_lights",
],
},
]
},
)
await hass.async_block_till_done()
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.get("light.light_group").state == STATE_ON
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"group/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("light.light_group") is None
assert hass.states.get("light.master_hall_lights_g") is not None
assert hass.states.get("light.outside_patio_lights_g") is not None
async def test_reload_with_platform_not_setup(hass):
"""Test the ability to reload lights."""
hass.states.async_set("light.bowl", STATE_ON)
await async_setup_component(
hass,
LIGHT_DOMAIN,
{
LIGHT_DOMAIN: [
{"platform": "demo"},
]
},
)
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {"entities": "light.Bowl", "icon": "mdi:work"},
}
},
)
await hass.async_block_till_done()
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"group/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("light.light_group") is None
assert hass.states.get("light.master_hall_lights_g") is not None
assert hass.states.get("light.outside_patio_lights_g") is not None
async def test_reload_with_base_integration_platform_not_setup(hass):
"""Test the ability to reload lights."""
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {"entities": "light.Bowl", "icon": "mdi:work"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("light.master_hall_lights", STATE_ON)
hass.states.async_set("light.master_hall_lights_2", STATE_OFF)
hass.states.async_set("light.outside_patio_lights", STATE_OFF)
hass.states.async_set("light.outside_patio_lights_2", STATE_OFF)
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"group/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("light.light_group") is None
assert hass.states.get("light.master_hall_lights_g") is not None
assert hass.states.get("light.outside_patio_lights_g") is not None
assert hass.states.get("light.master_hall_lights_g").state == STATE_ON
assert hass.states.get("light.outside_patio_lights_g").state == STATE_OFF
async def test_nested_group(hass):
"""Test nested light group."""
hass.states.async_set("light.kitchen", "on")
await async_setup_component(
hass,
LIGHT_DOMAIN,
{
LIGHT_DOMAIN: [
{
"platform": DOMAIN,
"entities": ["light.bedroom_group"],
"name": "Nested Group",
},
{
"platform": DOMAIN,
"entities": ["light.kitchen", "light.bedroom"],
"name": "Bedroom Group",
},
]
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("light.bedroom_group")
assert state is not None
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ENTITY_ID) == ["light.kitchen", "light.bedroom"]
state = hass.states.get("light.nested_group")
assert state is not None
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ENTITY_ID) == ["light.bedroom_group"]
def _get_fixtures_base_path():
return path.dirname(path.dirname(path.dirname(__file__)))
|
py | b40978fa30a01c462d954e92cdbc505929730c32 | """ Boilerplate CLI """
import sys
from boilerplate import Hello
def main():
"""CLI entrypoint"""
args = sys.argv[1:]
hello = Hello()
message = hello.greet(*args)
print(message)
|
py | b4097a2676c3dc30fcd9878f678ed7527b7af182 | """
Primitive operations for 3x3 orthonormal and 4x4 homogeneous matrices.
Python implementation by: Luis Fernando Lara Tobar and Peter Corke.
Based on original Robotics Toolbox for Matlab code by Peter Corke.
Permission to use and copy is granted provided that acknowledgement of
the authors is made.
@author: Luis Fernando Lara Tobar and Peter Corke
@revisor: Avelino Forechi updated to the most recent Matlab version
"""
from numpy import *
from utility import *
from numpy.linalg import norm
def rotx(theta):
"""
Rotation about X-axis
@type theta: number
@param theta: the rotation angle
@rtype: 3x3 orthonormal matrix
@return: rotation about X-axis
@see: L{roty}, L{rotz}, L{rotvec}
"""
ct = cos(theta)
st = sin(theta)
return mat([[1, 0, 0],
[0, ct, -st],
[0, st, ct]])
def roty(theta):
"""
Rotation about Y-axis
@type theta: number
@param theta: the rotation angle
@rtype: 3x3 orthonormal matrix
@return: rotation about Y-axis
@see: L{rotx}, L{rotz}, L{rotvec}
"""
ct = cos(theta)
st = sin(theta)
return mat([[ct, 0, st],
[0, 1, 0],
[-st, 0, ct]])
def rotz(theta):
"""
Rotation about Z-axis
@type theta: number
@param theta: the rotation angle
@rtype: 3x3 orthonormal matrix
@return: rotation about Z-axis
@see: L{rotx}, L{roty}, L{rotvec}
"""
ct = cos(theta)
st = sin(theta)
return mat([[ct, -st, 0],
[st, ct, 0],
[ 0, 0, 1]])
def trotx(theta):
"""
Rotation about X-axis
@type theta: number
@param theta: the rotation angle
@rtype: 4x4 homogeneous matrix
@return: rotation about X-axis
@see: L{troty}, L{trotz}, L{rotx}
"""
return r2t(rotx(theta))
def troty(theta):
"""
Rotation about Y-axis
@type theta: number
@param theta: the rotation angle
@rtype: 4x4 homogeneous matrix
@return: rotation about Y-axis
@see: L{troty}, L{trotz}, L{roty}
"""
return r2t(roty(theta))
def trotz(theta):
"""
Rotation about Z-axis
@type theta: number
@param theta: the rotation angle
@rtype: 4x4 homogeneous matrix
@return: rotation about Z-axis
@see: L{trotx}, L{troty}, L{rotz}
"""
return r2t(rotz(theta))
def tr2rpy(m):
"""
Extract RPY angles.
Returns a vector of RPY angles corresponding to the rotational part of
the homogeneous transform. The 3 angles correspond to rotations about
the Z, Y and X axes respectively.
@type m: 3x3 or 4x4 matrix
@param m: the rotation matrix
@rtype: 1x3 matrix
@return: RPY angles [S{theta} S{phi} S{psi}]
@see: L{rpy2tr}, L{tr2eul}
"""
m = mat(m)
if ishomog(m)==False:
error('input must be a homogeneous transform')
rpy = mat(zeros((1,3)))
if abs(abs(m[2,0])-1)<finfo(float).eps:
# singularity
rpy[0,0] = 0
rpy[0,1] = -arcsin(m[2,0])
if m[2,0] < 0:
rpy[0,2] = -arctan2(m[0,1], m[0,2])
else:
rpy[0,2] = arctan2(-m[0, 1], -m[0, 2])
else:
rpy[0,0] = arctan2(m[2,1],m[2,2])
rpy[0,1] = arctan2(-m[2,0]*cos(rpy[0,0]), m[2,2])
rpy[0,2] = arctan2(m[1,0], m[0,0])
return rpy
def rpy2r(roll, pitch=None,yaw=None,order='vehicle'):
"""
Rotation from RPY angles.
Two call forms:
- R = rpy2r(S{theta}, S{phi}, S{psi})
- R = rpy2r([S{theta}, S{phi}, S{psi}])
These correspond to rotations about the Z, Y, X axes respectively.
@type roll: number or list/array/matrix of angles
@param roll: roll angle, or a list/array/matrix of angles
@type pitch: number
@param pitch: pitch angle
@type yaw: number
@param yaw: yaw angle
@rtype: 4x4 homogenous matrix
@return: R([S{theta} S{phi} S{psi}])
@see: L{tr2rpy}, L{rpy2r}, L{tr2eul}
"""
n=1
if pitch==None and yaw==None:
roll= mat(roll)
if numcols(roll) != 3:
error('bad arguments')
n = numrows(roll)
pitch = roll[:,1]
yaw = roll[:,2]
roll = roll[:,0]
if n>1:
R = []
for i in range(0,n):
if order=='vehicle':
r = rotz(yaw[i,0]) * roty(pitch[i,0]) * rotx(roll[i,0])
else:
r = roty(yaw[i, 0]) * rotx(pitch[i, 0]) * rotz(roll[i, 0])
R.append(r)
return R
try:
if order == 'vehicle':
r = rotz(yaw[0,0]) * roty(pitch[0,0]) * rotx(roll[0,0])
else:
r = roty(yaw[0, 0]) * rotx(pitch[0, 0]) * rotz(roll[0, 0])
return r
except:
if order == 'vehicle':
r = rotz(yaw) * roty(pitch) * rotx(roll)
else:
r = roty(yaw) * rotx(pitch) * rotz(roll)
return r
def rpy2tr(roll, pitch=None, yaw=None):
"""
Rotation from RPY angles.
Two call forms:
- R = rpy2tr(r, p, y)
- R = rpy2tr([r, p, y])
These correspond to rotations about the Z, Y, X axes respectively.
@type roll: number or list/array/matrix of angles
@param roll: roll angle, or a list/array/matrix of angles
@type pitch: number
@param pitch: pitch angle
@type yaw: number
@param yaw: yaw angle
@rtype: 4x4 homogenous matrix
@return: R([S{theta} S{phi} S{psi}])
@see: L{tr2rpy}, L{rpy2r}, L{tr2eul}
"""
return r2t( rpy2r(roll, pitch, yaw) )
def tr2t(T):
if ishomog(T)==False:
error('input must be a homogeneous transform')
return T[0:3,3]
def t2r(T):
"""
Return rotational submatrix of a homogeneous transformation.
@type T: 4x4 homogeneous transformation
@param T: the transform matrix to convert
@rtype: 3x3 orthonormal rotation matrix
@return: rotation submatrix
"""
if ishomog(T)==False:
error('input must be a homogeneous transform')
return T[0:3,0:3]
def r2t(R):
"""
Convert a 3x3 orthonormal rotation matrix to a 4x4 homogeneous transformation::
T = | R 0 |
| 0 1 |
@type R: 3x3 orthonormal rotation matrix
@param R: the rotation matrix to convert
@rtype: 4x4 homogeneous matrix
@return: homogeneous equivalent
"""
return concatenate( (concatenate( (R, zeros((3,1))),1), mat([0,0,0,1])) )
def rt2tr(R, t):
"""
Convert rotation and translation to homogeneous transform
TR = RT2TR(R, t) is a homogeneous transformation matrix (N+1xN+1) formed
from an orthonormal rotation matrix R (NxN) and a translation vector t
(Nx1). Works for R in SO(2) or SO(3):
- If R is 2x2 and t is 2x1, then TR is 3x3
- If R is 3x3 and t is 3x1, then TR is 4x4
@see also L{T2R}, L{R2T}, L{TR2RT}.
@param R: 3x3 orthonormal rotation matrix
@param t: 3x1 vector
@return: T 4x4 homogeneous transformation
"""
if numcols(R) != numrows(R):
error('R must be square')
if numrows(R) != numrows(t):
error('R and t must have the same number of rows')
h = hstack( (zeros(numcols(R)), 1) )
T = hstack( (R, t) )
T = vstack( (T, h) )
return T
def cam2tr():
'''
---------------------------------------
Y |
| |
| |
| |
W--------- X | C--------- Z
/ | | \
/ | | \
/ | | \
Z | X Y
---------------------------------------
:return: 4x4 homogeneous matrix
'''
transf = mat([[0, -1, 0], # camera x-axis wrt world
[0, 0, -1], # camera y-axis wrt world
[1, 0, 0]]) # camera z-axis wrt world
transf = hstack((transf, [[0], # translation along x
[0], # translation along y
[0]] # translation along z
))
transf = vstack((transf, [0, 0, 0, 1])) # homogeneous coordinate
return transf
def euler_to_quat(roll, pitch, yaw):
'''
https://openslam.informatik.uni-freiburg.de/data/svn/MTK/trunk/cpp/example/relation/euler_stable.hpp
Euler to Quaternion conversion.
:param roll:
pitch:
yaw:
:return: quaternion
'''
sr = math.sin(roll * 0.5)
cr = math.cos(roll * 0.5)
sp = math.sin(pitch * 0.5)
cp = math.cos(pitch * 0.5)
sy = math.sin(yaw * 0.5)
cy = math.cos(yaw * 0.5)
w = cr * cp * cy + sr * sp * sy
x = sr * cp * cy - cr * sp * sy
y = cr * sp * cy + sr * cp * sy
z = cr * cp * sy - sr * sp * cy
return array((x, y, z, w))
def quat_to_euler(quaternion):
'''
https://openslam.informatik.uni-freiburg.de/data/svn/MTK/trunk/cpp/example/relation/euler_stable.hpp
Convert Quaternion quat to Euler angles, gracefully handling the singularity for abs(pitch) = PI/2.
Passing the result of this method to the method above should always result in quat or -quat assuming quat was normalized.
This method also has no problems handling non-normalized Quaternions.
:param quaternion:
:return:
'''
# Get yaw angle:
qx = quaternion[0] # x
qy = quaternion[1] # y
qz = quaternion[2] # z
qw = quaternion[3] # w
qx2 = qx * qx
qy2 = qy * qy
qz2 = qz * qz
qw2 = qw * qw
# for abs(pitch) = PI/2 this will lead to atan2(0,0)
yaw = math.atan2(2.0 * (qw * qz + qx * qy), qw2 + qx2 - qy2 - qz2)
# Now rotate the original Quaternion backwards by yaw:
c = math.cos(yaw / 2.0)
s = math.sin(yaw / 2.0)
px = c * qx + s * qy
py = c * qy - s * qx
pz = c * qz - s * qw
pw = c * qw + s * qz
px2 = px * px
py2 = py * py
pz2 = pz * pz
pw2 = pw * pw
# Now calculating pitch and roll does not have singularities anymore:
pitch = math.atan2(2 * (py * pw - px * pz), px2 + pw2 - py2 - pz2)
roll = math.atan2(2 * (px * pw - py * pz), py2 + pw2 - px2 - pz2)
return array((roll, pitch, yaw)) |
py | b4097b24dfd3e653d4f4e88acc82947c6c677a5f | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ContainerState(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'running': 'V1ContainerStateRunning',
'terminated': 'V1ContainerStateTerminated',
'waiting': 'V1ContainerStateWaiting'
}
attribute_map = {
'running': 'running',
'terminated': 'terminated',
'waiting': 'waiting'
}
def __init__(self, running=None, terminated=None, waiting=None):
"""
V1ContainerState - a model defined in Swagger
"""
self._running = None
self._terminated = None
self._waiting = None
self.discriminator = None
if running is not None:
self.running = running
if terminated is not None:
self.terminated = terminated
if waiting is not None:
self.waiting = waiting
@property
def running(self):
"""
Gets the running of this V1ContainerState.
Details about a running container
:return: The running of this V1ContainerState.
:rtype: V1ContainerStateRunning
"""
return self._running
@running.setter
def running(self, running):
"""
Sets the running of this V1ContainerState.
Details about a running container
:param running: The running of this V1ContainerState.
:type: V1ContainerStateRunning
"""
self._running = running
@property
def terminated(self):
"""
Gets the terminated of this V1ContainerState.
Details about a terminated container
:return: The terminated of this V1ContainerState.
:rtype: V1ContainerStateTerminated
"""
return self._terminated
@terminated.setter
def terminated(self, terminated):
"""
Sets the terminated of this V1ContainerState.
Details about a terminated container
:param terminated: The terminated of this V1ContainerState.
:type: V1ContainerStateTerminated
"""
self._terminated = terminated
@property
def waiting(self):
"""
Gets the waiting of this V1ContainerState.
Details about a waiting container
:return: The waiting of this V1ContainerState.
:rtype: V1ContainerStateWaiting
"""
return self._waiting
@waiting.setter
def waiting(self, waiting):
"""
Sets the waiting of this V1ContainerState.
Details about a waiting container
:param waiting: The waiting of this V1ContainerState.
:type: V1ContainerStateWaiting
"""
self._waiting = waiting
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ContainerState):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | b4097b8deb8cdb36df5fb0f3077c8546c56fb256 | # -*- coding: utf-8 -*-
__description__ = "Python client for interfacing with ModelDB and the Verta platform"
__license__ = "Apache 2.0"
__maintainer__ = "Michael Liu"
__maintainer_email__ = "[email protected]"
__title__ = "verta"
__url__ = "https://www.verta.ai/"
__version__ = "0.15.6"
|
py | b4097c343602c810f4eaaccd6a952985e4593237 | from ._exodus import read, write
__all__ = ["read", "write"]
|
py | b4097c57876acfa19a66eb1799fd0e83bda10f10 | import json
import pytest
import requests
@pytest.fixture(name="api_key")
def fixture_api_key():
# if this is ever changed, then fixture `basic_auth_string` **must** be recomputed
return "0"
@pytest.fixture(name="api_key_secret")
def fixture_api_key_secret():
# if this is ever changed, then fixture `basic_auth_string` **must** be recomputed
return "1"
@pytest.fixture(name="basic_auth_string")
def fixture_basic_auth_string():
# this **must** be recomputed whenever any of the fixtures
# `api_key` and `api_key_secret` changes
return "MDox"
@pytest.fixture(name="bearer_token")
def fixture_bearer_token():
# doesn't mean much, was taken off of twitter's documentation
return "AAAA%2FAAA%3DAAAAAAAA"
@pytest.fixture(name="mock_post_bearer_token_endpoint")
def fixture_mock_post_bearer_token_endpoint(
requests_mock, basic_auth_string, bearer_token, forbidden_response
):
def matcher(req):
if req.path != "/oauth2/token":
return None
if req.headers.get("Authorization") != f"Basic {basic_auth_string}":
return forbidden_response
if (
req.headers.get("Content-Type")
!= "application/x-www-form-urlencoded;charset=UTF-8"
):
return forbidden_response
if req.json().get("grant_type") != "client_credentials":
return forbidden_response
resp = requests.Response()
resp._content = json.dumps(
{"token_type": "bearer", "access_token": f"{bearer_token}"}
).encode()
resp.status_code = 200
return resp
requests_mock._adapter.add_matcher(matcher)
yield
@pytest.fixture(name="forbidden_response")
def fixture_forbidden_response():
resp = requests.Response()
resp.status_code = 403
return resp
@pytest.fixture(name="mock_json_from_api_get_endpoint")
def fixture_mock_json_from_api_get_endpoint():
return {
"data": [
{"id": "123456789", "text": "Python 2 is dead, long live Python 3!"},
{"id": "543212345", "text": "Python rocks."},
{"id": "333666999", "text": "TIL python is not always a snake."},
]
}
@pytest.fixture(name="mock_api_get_endpoint")
def fixture_mock_api_get_endpoint(
requests_mock, bearer_token, mock_json_from_api_get_endpoint
):
requests_mock.get(
"https://api.twitter.com/2/tweets/search/recent?query=python&max_results=3",
request_headers={"Authorization": f"Bearer {bearer_token}"},
json=mock_json_from_api_get_endpoint,
)
|
py | b4097c81e92bdf1e2501e8ec2f97b4a946f8fd41 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
####################
# VolvoOnCall plugin for indigo
#
# This plugin uses the python API published by Erik Eriksson
# https://github.com/molobrakos/volvooncall
#
# Based on sample code that is:
# Copyright (c) 2014, Perceptive Automation, LLC. All rights reserved.
# http://www.indigodomo.com
import indigo
import volvooncall
import time
################################################################################
class Plugin(indigo.PluginBase):
########################################
def __init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs):
indigo.PluginBase.__init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs)
self.debug = pluginPrefs.get("showDebugInfo", True)
self.version = pluginVersion
self.vehicles = []
#self.debug = True
self.states = {}
self.strstates = {}
self.numstates = {}
self.boolstates = {}
self.resetStates = True
self.cmdStates = {}
self.cmdStates["set_valet_mode"] = ""
def closedPrefsConfigUi(self, valuesDict, userCancelled):
# Since the dialog closed we want to set the debug flag - if you don't directly use
# a plugin's properties (and for debugLog we don't) you'll want to translate it to
# the appropriate stuff here.
if not userCancelled:
self.debug = valuesDict.get("showDebugInfo", False)
if self.debug:
indigo.server.log("Debug logging enabled")
else:
indigo.server.log("Debug logging disabled")
def getDeviceStateList(self, dev): #Override state list
stateList = indigo.PluginBase.getDeviceStateList(self, dev)
if stateList is not None:
# for key in self.states.iterkeys():
# dynamicState1 = self.getDeviceStateDictForStringType(key, key, key)
# stateList.append(dynamicState1)
#self.debugLog(str(stateList))
for key in self.strstates.iterkeys():
if ((self.resetStates) and (key in stateList)):
stateList.remove(key)
dynamicState1 = self.getDeviceStateDictForStringType(key, key, key)
stateList.append(dynamicState1)
for key in self.numstates.iterkeys():
if ((self.resetStates) and (key in stateList)):
stateList.remove(key)
dynamicState1 = self.getDeviceStateDictForNumberType(key, key, key)
stateList.append(dynamicState1)
for key in self.boolstates.iterkeys():
if ((self.resetStates) and (key in stateList)):
stateList.remove(key)
dynamicState1 = self.getDeviceStateDictForBoolTrueFalseType(key, key, key)
stateList.append(dynamicState1)
return sorted(stateList)
def getVehicles(self):
if not self.vehicles:
indigo.server.log("Fetching vehicles...")
try:
connection = volvooncall.Connection(session=None, self.pluginPrefs['username'],self.pluginPrefs['password'])
self.debugLog("Using API token: {}".format(connection.oauth['client_id']))
except Exception as e:
self.errorLog(e)
self.errorLog("Error creating connection")
self.errorLog("Plugin version: {}".format(self.version))
self.debugLog(traceback.format_exc())
self.vehicles = dict((unicode(v['id']),v) for v in connection.vehicles)
indigo.server.log("%i vehicles found" % len(self.vehicles))
#self.debugLog(self.vehicles)
for v in self.vehicles:
self.debugLog(u"Vehicle %s: %s [%s]" % (v,self.vehicles[v]['display_name'],self.vehicles[v]['vin']))
return self.vehicles
# Generate list of cars
def carListGenerator(self, filter="", valuesDict=None, typeId="", targetId=0):
cars = [(k, "%s (%s)" % (v['display_name'], v['vin']))
for k,v in self.getVehicles().items()]
self.debugLog("carListGenerator: %s" % str(cars))
return cars
def closedDeviceConfigUi(self, valuesDict, userCancelled, typeId, devId):
self.debugLog("Device ID: %s" % devId)
vehicleId = valuesDict['car']
statusName="doRefresh"
self.vehicleStatus2(statusName,vehicleId,devId)
return True
### ACTIONS
def validateActionConfigUi(self, valuesDict, typeId, actionId):
if typeId=='set_charge_limit':
try:
percent = int(valuesDict['percent'])
if percent > 100 or percent < 50:
raise ValueError
valuesDict['percent'] = percent
except ValueError:
errorsDict = indigo.Dict()
errorsDict['percent'] = "A percentage between 50 and 100"
return (False, valuesDict, errorsDict)
return (True, valuesDict)
def vehicleCommand(self, action, dev):
vehicleId = dev.pluginProps['car']
commandName = action.pluginTypeId
indigo.server.log("Tesla command %s for vehicle %s" % (commandName, vehicleId))
try:
vehicle = self.getVehicles()[vehicleId]
except KeyError:
self.errorLog(u"Vehicle ID %s not recognised. Please edit your Tesla Vehicle device and re-select the appropriate car." % vehicleId)
dev = indigo.devices[devId]
self.debugLog(u"Indigo device '%s' holds vehicleId of %s but this no longer exists in the vehicle list held by Tesla." % (dev.name,vehicleId))
return
if commandName == "wake_up":
self.response = vehicle.wake_up()
self.debugLog(self.response)
return
data = action.props
#self.debugLog(data)
i = 0
validReasons = ["already on", "already off",""]
invalidReasons = ["cabin comfort remote settings not enabled"]
self.response = "Incomplete"
while True:
try:
self.response = vehicle.command(commandName, data)
#self.debugLog(self.response)
except HTTPError as h:
self.errorLog(h)
self.errorLog("Timeout issuing command: {} {}".format(commandName,str(data)))
self.errorLog("Plugin version: {}".format(self.version))
self.debugLog(traceback.format_exc())
except Exception as e:
self.errorLog(e)
self.errorLog("Error issuing command: {} {}".format(commandName,str(data)))
self.errorLog("Plugin version: {}".format(self.version))
self.debugLog(traceback.format_exc())
self.debugLog(self.response)
if (self.response == "Incomplete"):
break
if (self.response["response"]["reason"] in validReasons) or self.response["response"]["result"] == True:
indigo.server.log("Sent %s successfully. Refreshing appropriate states..." % commandName)
self.debugLog("Sent %s successfully. Refreshing appropriate states..." % commandName)
action.pluginTypeId = self.cmdStates[commandName]
self.vehicleStatus(action,dev)
break
if (self.response["response"]["reason"] in invalidReasons):
indigo.server.log("Command %s declined: %s" % (commandName,self.response["response"]["reason"]))
self.debugLog("Command %s declined: %s" % (commandName,self.response["response"]["reason"]))
break
if "vehicle unavailable" in self.response["response"]["error"] or "mothership" in self.response["response"]["error"]:
indigo.server.log("Command %s declined: Vehicle unavailable" % commandName)
self.debugLog("Command %s declined: Vehicle unavailable" % commandName)
indigo.server.log(u"Automatically sending wake_up command before retrying...")
self.debugLog(u"Automatically sending wake_up command before retrying...")
vehicle.wake_up() #Try waking it up
indigo.server.log(u"Waiting 30 seconds before retrying...")
time.sleep(30) #20 seconds here because loop waits 10 itself
else:
self.debugLog(u"Failed attempt %s/5 because: %s" % (i,self.response["response"]["reason"]))
if i > 3:
self.debugLog(u"Automatically sending wake_up command before retrying...")
vehicle.wake_up() #Try waking it up
self.debugLog(u"Waiting 30 seconds before retrying...")
time.sleep(20) #20 seconds here because loop waits 10 itself
else:
self.debugLog(u"Retrying in 10 seconds...")
if i >= 5:
self.debugLog(u"%s failed after 5 attempts." % commandName)
indigo.server.log(u"%s failed after 5 attempts." % commandName)
break
i= i+1
time.sleep(10)
def vehicleStatus(self, action, dev):
vehicleId = dev.pluginProps['car']
statusName = action.pluginTypeId
#self.debugLog(str(dev))
if (statusName == ""):
return
self.vehicleStatus2(statusName,vehicleId,dev.id)
def vehicleStatus2(self,statusName,vehicleId,devId):
indigo.server.log("Tesla request %s for vehicle %s: Initialising" % (statusName, vehicleId))
try:
vehicle = self.getVehicles()[vehicleId]
except KeyError:
self.errorLog(u"Vehicle ID %s not recognised. Please edit your Tesla Vehicle device and re-select the appropriate car." % vehicleId)
dev = indigo.devices[devId]
self.debugLog(u"Indigo device '%s' holds vehicleId of %s but this no longer exists in the vehicle list held by Tesla." % (dev.name,vehicleId))
return
dev = indigo.devices[devId]
#self.debugLog(statusName)
if (statusName == "doRefresh"):
action = "charge_state"
self.vehicleStatus2(action,vehicleId,devId)
action = "drive_state"
self.vehicleStatus2(action,vehicleId,devId)
action = "climate_state"
self.vehicleStatus2(action,vehicleId,devId)
action = "vehicle_state"
self.vehicleStatus2(action,vehicleId,devId)
action = "gui_settings"
self.vehicleStatus2(action,vehicleId,devId)
action = "vehicle_config"
self.vehicleStatus2(action,vehicleId,devId)
return
self.response = "Incomplete"
try:
self.response = vehicle.data_request(statusName)
except HTTPError as h:
self.errorLog(h)
self.errorLog("Timeout retrieving status: {}".format(statusName))
self.debugLog(traceback.format_exc())
except Exception as e:
self.errorLog(e)
self.errorLog("Timeout retrieving status: {}".format(statusName))
self.debugLog(traceback.format_exc())
self.debugLog(u"Response: %s" % str(self.response))
if (self.response == None):
self.errorLog("No reply...")
return
if (self.response == "Incomplete"):
self.errorLog("Incomplete...")
return
if 'response' in self.response:
if self.response['response'] == None:
#self.debugLog("We don't appear to have an answer")
if 'error' in self.response:
#self.debugLog("ERROR ALERT")
if "vehicle unavailable" in self.response["error"]:
indigo.server.log("Command %s declined: Vehicle unavailable" % statusName)
self.debugLog("Command %s declined: Vehicle unavailable" % statusName)
elif "mothership" in self.response["error"]:
indigo.server.log("Command %s declined: Mothership unavailable" % statusName)
self.debugLog("Command %s declined: Mothership unavailable" % statusName)
else:
self.debugLog(u"No motherships found")
return
indigo.server.log(u"Automatically sending wake_up command before retrying...")
self.debugLog(u"Automatically sending wake_up command before retrying...")
vehicle.wake_up() #Try waking it up
indigo.server.log(u"Waiting 30 seconds before retrying...")
time.sleep(30) #30 seconds
self.vehicleStatus2(statusName,vehicleId,devId)
return
else:
self.debugLog(u"No errors")
return
else:
indigo.server.log("Tesla request %s for vehicle %s: Data received" % (statusName, vehicleId))
for k,v in sorted(self.response['response'].items()):
#self.debugLog("State %s, value %s, type %s" % (k,v,type(v)))
self.states[k] = v
if (type(v) is dict):
#indigo.server.log(u"Skipping state %s: JSON Dict found" % (k))
#self.debugLog(v)
for innerv in v:
#self.debugLog("State %s, value %s, type %s" % (innerv,v[innerv],type(v[innerv])))
self.updateTheState("%s_%s" % (k,innerv),v[innerv],dev)
else:
self.updateTheState(k,v,dev)
if (self.resetStates):
indigo.server.log("Tesla request %s for vehicle %s: New states found - reinitialising" % (statusName, vehicleId))
dev.stateListOrDisplayStateIdChanged()
self.resetStates = False
self.vehicleStatus2(statusName,vehicleId,devId) #Re-do this request now the states are reset
return
indigo.server.log("Tesla request %s for vehicle %s: Completed" % (statusName, vehicleId))
#self.debugLog(str(dev.states))
if (statusName == "drive_state"):
self.latLongHome = dev.ownerProps.get("latLongHome","37.394838,-122.150389").split(",")
self.latLongWork = dev.ownerProps.get("latLongWork","37.331820,-122.03118").split(",")
fromHomeKm = self.getDistance(dev.states['latitude'],dev.states['longitude'],float(self.latLongHome[0]),float(self.latLongHome[1]))
fromWorkKm = self.getDistance(dev.states['latitude'],dev.states['longitude'],float(self.latLongWork[0]),float(self.latLongWork[1]))
fromHomeM = fromHomeKm * 0.62137119223733
fromWorkM = fromWorkKm * 0.62137119223733
dev.updateStateOnServer("distanceFromHomeKm",round(fromHomeKm,2), uiValue=str(round(fromHomeKm,2))+"km")
dev.updateStateOnServer("distanceFromWorkKm",round(fromWorkKm,2), uiValue=str(round(fromWorkKm,2))+"km")
dev.updateStateOnServer("distanceFromHomeM",round(fromHomeM,2), uiValue=str(round(fromHomeM,2))+"m")
dev.updateStateOnServer("distanceFromWorkM",round(fromWorkM,2), uiValue=str(round(fromWorkM,2))+"m")
def updateTheState(self,inKey,inValue,dev):
if (inKey in dev.states) and (self.resetStates == False):
#self.debugLog(str(type(v)))
dev.updateStateOnServer(inKey,inValue)
if (inKey == dev.ownerProps.get("stateToDisplay","")):
dev.updateStateOnServer("displayState",inValue)
else:
#self.debugLog("New states found - recreating state list...")
self.resetStates = True #We obviously need to reset states if we've got data for one that doesn't exist
if (inValue == None):
self.strstates[inKey] = inValue
elif (type(inValue) is float):
self.numstates[inKey] = inValue
elif (type(inValue) is int):
self.numstates[inKey] = inValue
elif (type(inValue) is bool):
self.boolstates[inKey] = inValue
elif (type(inValue) is str):
self.strstates[inKey] = inValue
elif (type(inValue) is unicode):
self.strstates[inKey] = inValue
else:
self.strstates[inKey] = inValue
def getDistance(self,atLat,atLong,fromLat,fromLong):
# approximate radius of earth in km
R = 6373.0
lat1 = radians(float(atLat)) #Where is vehicle at
lon1 = radians(float(atLong))
lat2 = radians(float(fromLat)) #Where are we testing from, eg Home
lon2 = radians(float(fromLong))
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = R * c
#self.debugLog(u"Result: %s" % distance)
#self.debugLog(u"Should be: 278.546 km")
return distance
def runConcurrentThread(self):
try:
while True:
if not self.vehicles:
self.getVehicles()
self.sleep(60) # in seconds
except self.StopThread:
# do any cleanup here
pass |
py | b4097cafa1487515c34337f8789a595f2caf6b77 | from matplotlib import pyplot as plt
import csv
import math
import numpy as np
import os
import torch
from torchvision import transforms, datasets
def plot_log(filename, show=True):
# load data
keys = []
values = []
with open(filename, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
if keys == []:
for key, value in row.items():
keys.append(key)
values.append(float(value))
continue
for _, value in row.items():
values.append(float(value))
values = np.reshape(values, newshape=(-1, len(keys)))
fig = plt.figure(figsize=(4,6))
fig.subplots_adjust(top=0.95, bottom=0.05, right=0.95)
fig.add_subplot(211)
epoch_axis = 0
for i, key in enumerate(keys):
if key == 'epoch':
epoch_axis = i
values[:, epoch_axis] += 1
break
for i, key in enumerate(keys):
if key.find('loss') >= 0: # loss
print(values[:, i])
plt.plot(values[:, epoch_axis], values[:, i], label=key)
plt.legend()
plt.title('Training loss')
fig.add_subplot(212)
for i, key in enumerate(keys):
if key.find('acc') >= 0: # acc
plt.plot(values[:, epoch_axis], values[:, i], label=key)
plt.legend()
plt.grid()
plt.title('Accuracy')
# fig.savefig('result/log.png')
if show:
plt.show()
def combine_images(generated_images):
num = generated_images.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num)/width))
shape = generated_images.shape[1:3]
image = np.zeros((height*shape[0], width*shape[1]),
dtype=generated_images.dtype)
for index, img in enumerate(generated_images):
i = int(index/width)
j = index % width
image[i*shape[0]:(i+1)*shape[0], j*shape[1]:(j+1)*shape[1]] = \
img[:, :, 0]
return image
def show_img(x_recon, x_real, save_dir):
data = np.array([[x_real[i], x_recon[i]] for i in range(len(x_real))])
data = np.reshape(data, (data.shape[0]*data.shape[1],)+data.shape[2:])
img = combine_images(np.transpose(data, [0, 2, 3, 1]))
image = (img * 255).astype(np.uint8)
figure = plt.figure()
plt.imshow(image, cmap='gray')
plt.title(' ')
plt.axis('off')
plt.text(7, -3, ' real \n inputs')
plt.text(30, -3, 'reconstructed \n inputs')
plt.show()
figure.savefig(os.path.join(save_dir, "real_and_recon.png"), format='png')
def load_mnist(path='./datasets', download=False, batch_size=100, shift_pixels=2):
"""
Construct dataloaders for training and test data. Data augmentation is also done here.
:param path: file path of the dataset
:param download: whether to download the original data
:param batch_size: batch size
:param shift_pixels: maximum number of pixels to shift in each direction
:return: train_loader, test_loader
"""
kwargs = {'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(path, train=True, download=download,
transform=transforms.Compose([transforms.RandomCrop(size=28, padding=shift_pixels),
transforms.ToTensor()])),
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(path, train=False, download=download,
transform=transforms.ToTensor()),
batch_size=batch_size, shuffle=True, **kwargs)
return train_loader, test_loader
|
py | b4097fd2308810dc8d7fe8fcb50b3020b2c0a97f | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'j \d\e F \d\e Y \á\s H:i'
YEAR_MONTH_FORMAT = r'F \d\e Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y, H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
# NUMBER_GROUPING =
|
py | b4098156269c966cce9219083fcc41d5f76fc0c3 | import asyncio
from tinkoff.invest import (
AsyncClient,
CandleInstrument,
MarketDataRequest,
SubscribeCandlesRequest,
SubscriptionAction,
SubscriptionInterval,
)
from tinkoff.invest.token import TOKEN
async def main():
async def request_iterator():
yield MarketDataRequest(
subscribe_candles_request=SubscribeCandlesRequest(
subscription_action=SubscriptionAction.SUBSCRIPTION_ACTION_SUBSCRIBE,
instruments=[
CandleInstrument(
figi="BBG004730N88",
interval=SubscriptionInterval.SUBSCRIPTION_INTERVAL_ONE_MINUTE,
)
],
)
)
while True:
await asyncio.sleep(1)
async with AsyncClient(TOKEN) as client:
async for marketdata in client.market_data_stream.market_data_stream(
request_iterator()
):
print(marketdata)
if __name__ == "__main__":
asyncio.run(main())
|
py | b40981a99cbe2b7a213337cab181c98435eeeb13 | # -*- encoding: utf-8
from decimal import Decimal
from unittest.mock import Mock
from sqlalchemy import Column
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import Integer
from sqlalchemy import Numeric
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy.dialects.mssql import base
from sqlalchemy.dialects.mssql import pymssql
from sqlalchemy.dialects.mssql import pyodbc
from sqlalchemy.engine import url
from sqlalchemy.exc import DBAPIError
from sqlalchemy.exc import IntegrityError
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import assert_warnings
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import mock
class ParseConnectTest(fixtures.TestBase):
def test_pyodbc_connect_dsn_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url("mssql+pyodbc://mydsn")
connection = dialect.create_connect_args(u)
eq_([["dsn=mydsn;Trusted_Connection=Yes"], {}], connection)
def test_pyodbc_connect_old_style_dsn_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url("mssql+pyodbc:///?dsn=mydsn")
connection = dialect.create_connect_args(u)
eq_([["dsn=mydsn;Trusted_Connection=Yes"], {}], connection)
def test_pyodbc_connect_dsn_non_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url("mssql+pyodbc://username:password@mydsn")
connection = dialect.create_connect_args(u)
eq_([["dsn=mydsn;UID=username;PWD=password"], {}], connection)
def test_pyodbc_connect_dsn_extra(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql+pyodbc://username:password@mydsn/?LANGUAGE=us_"
"english&foo=bar"
)
connection = dialect.create_connect_args(u)
dsn_string = connection[0][0]
assert ";LANGUAGE=us_english" in dsn_string
assert ";foo=bar" in dsn_string
def test_pyodbc_hostname(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql+pyodbc://username:password@hostspec/database?driver=SQL+Server" # noqa
)
connection = dialect.create_connect_args(u)
eq_(
[
[
"DRIVER={SQL Server};Server=hostspec;Database=database;UI"
"D=username;PWD=password"
],
{},
],
connection,
)
def test_pyodbc_empty_url_no_warning(self):
dialect = pyodbc.dialect()
u = url.make_url("mssql+pyodbc://")
# no warning is emitted
dialect.create_connect_args(u)
def test_pyodbc_host_no_driver(self):
dialect = pyodbc.dialect()
u = url.make_url("mssql+pyodbc://username:password@hostspec/database")
def go():
return dialect.create_connect_args(u)
connection = assert_warnings(
go,
[
"No driver name specified; this is expected by "
"PyODBC when using DSN-less connections"
],
)
eq_(
[
[
"Server=hostspec;Database=database;UI"
"D=username;PWD=password"
],
{},
],
connection,
)
def test_pyodbc_connect_comma_port(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql+pyodbc://username:password@hostspec:12345/data"
"base?driver=SQL Server"
)
connection = dialect.create_connect_args(u)
eq_(
[
[
"DRIVER={SQL Server};Server=hostspec,12345;Database=datab"
"ase;UID=username;PWD=password"
],
{},
],
connection,
)
def test_pyodbc_connect_config_port(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql+pyodbc://username:password@hostspec/database?p"
"ort=12345&driver=SQL+Server"
)
connection = dialect.create_connect_args(u)
eq_(
[
[
"DRIVER={SQL Server};Server=hostspec;Database=database;UI"
"D=username;PWD=password;port=12345"
],
{},
],
connection,
)
def test_pyodbc_extra_connect(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql+pyodbc://username:password@hostspec/database?L"
"ANGUAGE=us_english&foo=bar&driver=SQL+Server"
)
connection = dialect.create_connect_args(u)
eq_(connection[1], {})
eq_(
connection[0][0]
in (
"DRIVER={SQL Server};Server=hostspec;Database=database;"
"UID=username;PWD=password;foo=bar;LANGUAGE=us_english",
"DRIVER={SQL Server};Server=hostspec;Database=database;UID="
"username;PWD=password;LANGUAGE=us_english;foo=bar",
),
True,
)
def test_pyodbc_extra_connect_azure(self):
# issue #5592
dialect = pyodbc.dialect()
u = url.make_url(
"mssql+pyodbc://@server_name/db_name?"
"driver=ODBC+Driver+17+for+SQL+Server&"
"authentication=ActiveDirectoryIntegrated"
)
connection = dialect.create_connect_args(u)
eq_(connection[1], {})
eq_(
connection[0][0]
in (
"DRIVER={ODBC Driver 17 for SQL Server};"
"Server=server_name;Database=db_name;"
"Authentication=ActiveDirectoryIntegrated",
),
True,
)
def test_pyodbc_odbc_connect(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql+pyodbc:///?odbc_connect=DRIVER%3D%7BSQL+Server"
"%7D%3BServer%3Dhostspec%3BDatabase%3Ddatabase"
"%3BUID%3Dusername%3BPWD%3Dpassword"
)
connection = dialect.create_connect_args(u)
eq_(
[
[
"DRIVER={SQL Server};Server=hostspec;Database=database;UI"
"D=username;PWD=password"
],
{},
],
connection,
)
def test_pyodbc_odbc_connect_with_dsn(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql+pyodbc:///?odbc_connect=dsn%3Dmydsn%3BDatabase"
"%3Ddatabase%3BUID%3Dusername%3BPWD%3Dpassword"
)
connection = dialect.create_connect_args(u)
eq_(
[["dsn=mydsn;Database=database;UID=username;PWD=password"], {}],
connection,
)
def test_pyodbc_odbc_connect_ignores_other_values(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql+pyodbc://userdiff:passdiff@localhost/dbdiff?od"
"bc_connect=DRIVER%3D%7BSQL+Server%7D%3BServer"
"%3Dhostspec%3BDatabase%3Ddatabase%3BUID%3Duse"
"rname%3BPWD%3Dpassword"
)
connection = dialect.create_connect_args(u)
eq_(
[
[
"DRIVER={SQL Server};Server=hostspec;Database=database;UI"
"D=username;PWD=password"
],
{},
],
connection,
)
def test_pyodbc_token_injection(self):
token1 = "someuser%3BPORT%3D50001"
token2 = "some{strange}pw%3BPORT%3D50001"
token3 = "somehost%3BPORT%3D50001"
token4 = "somedb%3BPORT%3D50001"
u = url.make_url(
"mssql+pyodbc://%s:%s@%s/%s?driver=foob"
% (token1, token2, token3, token4)
)
dialect = pyodbc.dialect()
connection = dialect.create_connect_args(u)
eq_(
[
[
"DRIVER={foob};Server=somehost%3BPORT%3D50001;"
"Database=somedb%3BPORT%3D50001;UID={someuser;PORT=50001};"
"PWD={some{strange}}pw;PORT=50001}"
],
{},
],
connection,
)
def test_pymssql_port_setting(self):
dialect = pymssql.dialect()
u = url.make_url("mssql+pymssql://scott:tiger@somehost/test")
connection = dialect.create_connect_args(u)
eq_(
[
[],
{
"host": "somehost",
"password": "tiger",
"user": "scott",
"database": "test",
},
],
connection,
)
u = url.make_url("mssql+pymssql://scott:tiger@somehost:5000/test")
connection = dialect.create_connect_args(u)
eq_(
[
[],
{
"host": "somehost:5000",
"password": "tiger",
"user": "scott",
"database": "test",
},
],
connection,
)
def test_pymssql_disconnect(self):
dialect = pymssql.dialect()
for error in [
"Adaptive Server connection timed out",
"Net-Lib error during Connection reset by peer",
"message 20003",
"Error 10054",
"Not connected to any MS SQL server",
"Connection is closed",
"message 20006", # Write to the server failed
"message 20017", # Unexpected EOF from the server
"message 20047", # DBPROCESS is dead or not enabled
]:
eq_(dialect.is_disconnect(error, None, None), True)
eq_(dialect.is_disconnect("not an error", None, None), False)
def test_pyodbc_disconnect(self):
dialect = pyodbc.dialect()
class MockDBAPIError(Exception):
pass
class MockProgrammingError(MockDBAPIError):
pass
dialect.dbapi = Mock(
Error=MockDBAPIError, ProgrammingError=MockProgrammingError
)
for error in [
MockDBAPIError(code, "[%s] some pyodbc message" % code)
for code in [
"08S01",
"01002",
"08003",
"08007",
"08S02",
"08001",
"HYT00",
"HY010",
]
] + [
MockProgrammingError(message)
for message in [
"(some pyodbc stuff) The cursor's connection has been closed.",
"(some pyodbc stuff) Attempt to use a closed connection.",
]
]:
eq_(dialect.is_disconnect(error, None, None), True)
eq_(
dialect.is_disconnect(
MockProgrammingError("Query with abc08007def failed"),
None,
None,
),
False,
)
@testing.requires.mssql_freetds
def test_bad_freetds_warning(self):
engine = engines.testing_engine()
def _bad_version(connection):
return 95, 10, 255
engine.dialect._get_server_version_info = _bad_version
assert_raises_message(
exc.SAWarning, "Unrecognized server version info", engine.connect
)
class FastExecutemanyTest(fixtures.TestBase):
__only_on__ = "mssql"
__backend__ = True
__requires__ = ("pyodbc_fast_executemany",)
def test_flag_on(self, metadata):
t = Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
t.create(testing.db)
eng = engines.testing_engine(options={"fast_executemany": True})
@event.listens_for(eng, "after_cursor_execute")
def after_cursor_execute(
conn, cursor, statement, parameters, context, executemany
):
if executemany:
assert cursor.fast_executemany
with eng.begin() as conn:
conn.execute(
t.insert(),
[{"id": i, "data": "data_%d" % i} for i in range(100)],
)
conn.execute(t.insert(), {"id": 200, "data": "data_200"})
@testing.fixture
def fe_engine(self, testing_engine):
def go(use_fastexecutemany, apply_setinputsizes_flag):
engine = testing_engine(
options={
"fast_executemany": use_fastexecutemany,
"use_setinputsizes": apply_setinputsizes_flag,
}
)
return engine
return go
@testing.combinations(
(
"setinputsizeshook",
True,
),
(
"nosetinputsizeshook",
False,
),
argnames="include_setinputsizes",
id_="ia",
)
@testing.combinations(
(
"setinputsizesflag",
True,
),
(
"nosetinputsizesflag",
False,
),
argnames="apply_setinputsizes_flag",
id_="ia",
)
@testing.combinations(
(
"fastexecutemany",
True,
),
(
"nofastexecutemany",
False,
),
argnames="use_fastexecutemany",
id_="ia",
)
def test_insert_floats(
self,
metadata,
fe_engine,
include_setinputsizes,
use_fastexecutemany,
apply_setinputsizes_flag,
):
expect_failure = (
apply_setinputsizes_flag
and not include_setinputsizes
and use_fastexecutemany
)
engine = fe_engine(use_fastexecutemany, apply_setinputsizes_flag)
observations = Table(
"Observations",
metadata,
Column("id", Integer, nullable=False, primary_key=True),
Column("obs1", Numeric(19, 15), nullable=True),
Column("obs2", Numeric(19, 15), nullable=True),
schema="test_schema",
)
with engine.begin() as conn:
metadata.create_all(conn)
records = [
{
"id": 1,
"obs1": Decimal("60.1722066045792"),
"obs2": Decimal("24.929289808227466"),
},
{
"id": 2,
"obs1": Decimal("60.16325715615476"),
"obs2": Decimal("24.93886459535008"),
},
{
"id": 3,
"obs1": Decimal("60.16445165123469"),
"obs2": Decimal("24.949856300109516"),
},
]
if include_setinputsizes:
canary = mock.Mock()
@event.listens_for(engine, "do_setinputsizes")
def do_setinputsizes(
inputsizes, cursor, statement, parameters, context
):
canary(list(inputsizes.values()))
for key in inputsizes:
if isinstance(key.type, Numeric):
inputsizes[key] = (
engine.dialect.dbapi.SQL_DECIMAL,
19,
15,
)
with engine.begin() as conn:
if expect_failure:
with expect_raises(DBAPIError):
conn.execute(observations.insert(), records)
else:
conn.execute(observations.insert(), records)
eq_(
conn.execute(
select(observations).order_by(observations.c.id)
)
.mappings()
.all(),
records,
)
if include_setinputsizes:
if apply_setinputsizes_flag:
eq_(
canary.mock_calls,
[
# float for int? this seems wrong
mock.call([float, float, float]),
mock.call([]),
],
)
else:
eq_(canary.mock_calls, [])
class VersionDetectionTest(fixtures.TestBase):
@testing.fixture
def mock_conn_scalar(self):
return lambda text: Mock(
exec_driver_sql=Mock(
return_value=Mock(scalar=Mock(return_value=text))
)
)
def test_pymssql_version(self, mock_conn_scalar):
dialect = pymssql.MSDialect_pymssql()
for vers in [
"Microsoft SQL Server Blah - 11.0.9216.62",
"Microsoft SQL Server (XYZ) - 11.0.9216.62 \n"
"Jul 18 2014 22:00:21 \nCopyright (c) Microsoft Corporation",
"Microsoft SQL Azure (RTM) - 11.0.9216.62 \n"
"Jul 18 2014 22:00:21 \nCopyright (c) Microsoft Corporation",
]:
conn = mock_conn_scalar(vers)
eq_(dialect._get_server_version_info(conn), (11, 0, 9216, 62))
def test_pyodbc_version_productversion(self, mock_conn_scalar):
dialect = pyodbc.MSDialect_pyodbc()
conn = mock_conn_scalar("11.0.9216.62")
eq_(dialect._get_server_version_info(conn), (11, 0, 9216, 62))
def test_pyodbc_version_fallback(self):
dialect = pyodbc.MSDialect_pyodbc()
dialect.dbapi = Mock()
for vers, expected in [
("11.0.9216.62", (11, 0, 9216, 62)),
("notsqlserver.11.foo.0.9216.BAR.62", (11, 0, 9216, 62)),
("Not SQL Server Version 10.5", (5,)),
]:
conn = Mock(
exec_driver_sql=Mock(
return_value=Mock(
scalar=Mock(
side_effect=exc.DBAPIError("stmt", "params", None)
)
)
),
connection=Mock(getinfo=Mock(return_value=vers)),
)
eq_(dialect._get_server_version_info(conn), expected)
class RealIsolationLevelTest(fixtures.TestBase):
__only_on__ = "mssql"
__backend__ = True
def test_isolation_level(self, metadata):
Table("test", metadata, Column("id", Integer)).create(
testing.db, checkfirst=True
)
with testing.db.connect() as c:
default = testing.db.dialect.get_isolation_level(c.connection)
values = [
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
"SERIALIZABLE",
"SNAPSHOT",
]
for value in values:
with testing.db.connect() as c:
c.execution_options(isolation_level=value)
c.exec_driver_sql("SELECT TOP 10 * FROM test")
eq_(
testing.db.dialect.get_isolation_level(c.connection), value
)
with testing.db.connect() as c:
eq_(testing.db.dialect.get_isolation_level(c.connection), default)
class IsolationLevelDetectTest(fixtures.TestBase):
def _fixture(self, view):
class Error(Exception):
pass
dialect = pyodbc.MSDialect_pyodbc()
dialect.dbapi = Mock(Error=Error)
dialect.server_version_info = base.MS_2012_VERSION
result = []
def fail_on_exec(
stmt,
):
if view is not None and view in stmt:
result.append(("SERIALIZABLE",))
else:
raise Error("that didn't work")
connection = Mock(
cursor=Mock(
return_value=Mock(
execute=fail_on_exec, fetchone=lambda: result[0]
)
)
)
return dialect, connection
def test_dm_pdw_nodes(self):
dialect, connection = self._fixture("dm_pdw_nodes_exec_sessions")
eq_(dialect.get_isolation_level(connection), "SERIALIZABLE")
def test_exec_sessions(self):
dialect, connection = self._fixture("exec_sessions")
eq_(dialect.get_isolation_level(connection), "SERIALIZABLE")
def test_not_supported(self):
dialect, connection = self._fixture(None)
with expect_warnings("Could not fetch transaction isolation level"):
assert_raises_message(
NotImplementedError,
"Can't fetch isolation",
dialect.get_isolation_level,
connection,
)
class InvalidTransactionFalsePositiveTest(fixtures.TablesTest):
__only_on__ = "mssql"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"error_t",
metadata,
Column("error_code", String(50), primary_key=True),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.error_t.insert(),
[{"error_code": "01002"}],
)
def test_invalid_transaction_detection(self, connection):
# issue #5359
t = self.tables.error_t
# force duplicate PK error
assert_raises(
IntegrityError,
connection.execute,
t.insert(),
{"error_code": "01002"},
)
# this should not fail with
# "Can't reconnect until invalid transaction is rolled back."
result = connection.execute(t.select()).fetchall()
eq_(len(result), 1)
|
py | b40981ba1af5588fd1003d4470183d314b9a9932 | class Bird(object):
feather = True
class Chicken(Bird):
fly = False
def __init__(self, age):
self.age = age
def __getattr__(self, name):
if name == "adult":
if self.age > 1.0:
return True
else:
return False
else:
raise AttributeError(name)
summer = Chicken(2)
print(summer.male)
|
py | b409820b1cf0cf69fd157b6784245a61efe3c7c3 | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for quant_utils."""
# pylint: disable=bad-whitespace
# pylint: disable=bad-continuation
import lingvo.compat as tf
from lingvo.core import py_utils
from lingvo.core import quant_test_lib
from lingvo.core import quant_utils
class QuantizableLayerTest(quant_test_lib.QuantUtilsBaseTest):
def testOpWrapperArgChecking(self):
with self.session():
p = quant_test_lib.SampleQuantizedProjectionLayer.Params()
p.name = 'test'
l = p.Instantiate()
l.TrackQTensor('test')
fns = l.fns
# Just testing one dynamic and one const op.
# Dynamic.
fns.qadd(1, 1, qt='test')
fns.qadd(1, 1, qmin=-1.0, qmax=1.0)
with self.assertRaises(AssertionError):
fns.qadd(1, 1) # No range args.
with self.assertRaises(AssertionError):
fns.qadd(1, 1, qmin=-1.0) # Incomplete range args.
with self.assertRaises(AssertionError):
fns.qadd(1, 1, qmax=-1.0) # Incomplete range args.
with self.assertRaisesRegex(AssertionError, 'first calling TrackQTensor'):
fns.qadd(1, 1, qt='non_existing') # Test that qt is resolved.
# Const.
fns.qtanh(6.0) # No min/max.
fns.qtanh(6.0, qmin=-5.0, qmax=6.0) # Min/max
fns.qtanh(6.0, qt='test')
with self.assertRaisesRegex(AssertionError, 'first calling TrackQTensor'):
fns.qtanh(6.0, qt='non_existing') # Test that qt has precedence.
def testLayerWithNoQDomain(self):
with self.session():
p = quant_test_lib.SampleQuantizedProjectionLayer.Params()
self._testLayerHelper('testLayerWithNoQDomain', p,
self.NO_QDOMAIN_EXPECTED)
def testLayerWithIdentityQDomain(self):
with self.session():
p = quant_test_lib.SampleQuantizedProjectionLayer.Params()
p.qdomain.default = quant_utils.QDomain.Params()
self._testLayerHelper('testLayerWithIdentityQDomain', p,
self.NO_QDOMAIN_EXPECTED)
def testLayerWithPassiveAsymQDomain(self):
# pyformat: disable
expected = [
[[ 0. , -0.03921568, -0.02352941, -0.00784314],
[ 0.0862745 , 0.13333333, -0.03137255, 0.06274509],
[ 0. , 0. , 0. , 0. ],
[-0.02352941, -0.17254901, -0.05490196, 0.02352941]],
[[ 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. ],
[-0.02352941, -0.10196078, -0.00784314, 0.07058823],
[ 0.02352941, -0.1490196 , -0.09411764, 0.01568627]]]
# pyformat: enable
with self.session():
p = quant_test_lib.SampleQuantizedProjectionLayer.Params()
p.qdomain.default = quant_utils.PassiveAsymQDomain.Params()
l = self._testLayerHelper(
'testLayerWithPassiveAsymQDomain', p, expected=expected)
init_minmax_vars = l.qdomain_default._qvars.Transform(lambda x: x.eval())
print('Initial Minmax vars:', init_minmax_vars)
# Record.
with py_utils.GlobalStepContext(16):
self.evaluate([l.PostTrainingStepUpdate()])
minmax_vars = l.qdomain_default._qvars.Transform(lambda x: x.eval())
print('Minmax vars:', minmax_vars)
# Make sure that the vars have moved from their defaults.
for k in minmax_vars:
self.assertNotEqual(init_minmax_vars[k], minmax_vars[k])
def testLayerWithPassiveAsymQDomainTrainQuantDisabledInital(self):
p = quant_test_lib.SampleQuantizedProjectionLayer.Params()
p.qdomain.default = quant_utils.PassiveAsymQDomain.Params()
p.qdomain.default.delay_start_steps = -1
with self.session():
self._testLayerHelper(
'testLayerWithPassiveAsymQDomainTrainQuantDisabledInital',
p,
expected=self.NO_QDOMAIN_EXPECTED)
def testLayerWithPassiveAsymQDomainTrainQuantDisabledStep16(self):
p = quant_test_lib.SampleQuantizedProjectionLayer.Params()
p.qdomain.default = quant_utils.PassiveAsymQDomain.Params()
p.qdomain.default.delay_start_steps = -1
with self.session():
self._testLayerHelper(
'testLayerWithPassiveAsymQDomainTrainQuantDisabledStep16',
p,
expected=self.NO_QDOMAIN_EXPECTED,
global_step=16)
def testLayerWithPassiveAsymQDomainEvalQuantDisabled(self):
with self.session(), self.SetEval(True):
p = quant_test_lib.SampleQuantizedProjectionLayer.Params()
p.qdomain.default = quant_utils.PassiveAsymQDomain.Params()
p.qdomain.default.delay_start_steps = -1
self._testLayerHelper(
'testLayerWithPassiveAsymQDomainEvalQuantDisabled',
p,
not_expected=self.NO_QDOMAIN_EXPECTED)
def testLayerWithPassiveAsymQDomainTrainQuantDelayNotSatisfied(self):
p = quant_test_lib.SampleQuantizedProjectionLayer.Params()
p.qdomain.default = quant_utils.PassiveAsymQDomain.Params()
p.qdomain.default.delay_start_steps = 8
with self.session():
self._testLayerHelper(
'testLayerWithPassiveAsymQDomainTrainQuantDelayNotSatisfied',
p,
expected=self.NO_QDOMAIN_EXPECTED,
global_step=3)
def testLayerWithPassiveAsymQDomainTrainQuantDelaySatisfied(self):
p = quant_test_lib.SampleQuantizedProjectionLayer.Params()
p.qdomain.default = quant_utils.PassiveAsymQDomain.Params()
p.qdomain.default.delay_start_steps = 8
with self.session():
self._testLayerHelper(
'testLayerWithPassiveAsymQDomainTrainQuantDelaySatisfied',
p,
not_expected=self.NO_QDOMAIN_EXPECTED,
global_step=8)
def testLayerWithPassiveAsymQDomainTrainQuantDelaySatisfiedPlusOne(self):
p = quant_test_lib.SampleQuantizedProjectionLayer.Params()
p.qdomain.default = quant_utils.PassiveAsymQDomain.Params()
p.qdomain.default.delay_start_steps = 8
with self.session():
self._testLayerHelper(
'testLayerWithPassiveAsymQDomainTrainQuantDelaySatisfied',
p,
not_expected=self.NO_QDOMAIN_EXPECTED,
global_step=9)
def testLayerWithSymmetricScheduledClipQDomain(self):
# pyformat: disable
expected = [
[[ 0. , -0.0390625, -0.015625 , -0.0078125],
[ 0.0859375, 0.140625 , -0.0234375, 0.0625 ],
[ 0. , 0. , 0. , 0. ],
[-0.0234375, -0.171875 , -0.0546875, 0.0234375]],
[[ 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. ],
[-0.0234375, -0.1015625, -0.015625 , 0.0703125],
[ 0. , -0.125 , -0.0625 , 0. ]]]
# pyformat: enable
with self.session():
p = quant_test_lib.SampleQuantizedProjectionLayer.Params()
p.qdomain.default = quant_utils.SymmetricScheduledClipQDomain.Params()
p.qdomain.default.cc_schedule.Set(
clip_start_step=0,
clip_end_step=5,
quant_start_step=10,
)
self._testLayerHelper(
'testLayerWithSymmetricScheduledClipQDomain',
p,
expected=expected,
global_step=16)
class ClippingCapScheduleTest:
def testLinearClippingCapSchedule(self):
p = quant_utils.LinearClippingCapSchedule.Params()
p.start_step = 50
p.end_step = 100
p.start_cap = 6.0
p.end_cap = 1.0
cc_schedule = p.Instantiate()
with self.session():
self.assertAllClose(cc_schedule._Value(25).eval(), 6.0)
self.assertAllClose(cc_schedule._Value(50).eval(), 6.0)
self.assertAllClose(cc_schedule._Value(60).eval(), 5.0)
self.assertAllClose(cc_schedule._Value(70).eval(), 4.0)
self.assertAllClose(cc_schedule._Value(80).eval(), 3.0)
self.assertAllClose(cc_schedule._Value(90).eval(), 2.0)
self.assertAllClose(cc_schedule._Value(100).eval(), 1.0)
self.assertAllClose(cc_schedule._Value(110).eval(), 1.0)
def _ClipExample(self, cc_schedule, v):
"""Returns a tuple of (neg, pos) for clipped neg/pos values of v."""
v = float(v)
clipped = (
cc_schedule.ApplyClipping(cc_schedule.theta, -v).eval(),
cc_schedule.ApplyClipping(cc_schedule.theta, v).eval(),
)
print('Clipped +-', v, ' ->', clipped)
return clipped
def testFakeQuantizationScheduleFromDefun(self):
p = quant_utils.FakeQuantizationSchedule.Params()
p.clip_start_step = 5
p.clip_end_step = 10
p.quant_start_step = 15
p.start_cap = 6.0
p.end_cap = 1.0
with self.session():
cc_schedule = p.Instantiate()
self.evaluate(tf.global_variables_initializer())
# Move to fully quantized part of schedule
self.evaluate(tf.assign(py_utils.GetOrCreateGlobalStepVar(), 16))
@tf.function(autograph=False)
def ExampleFunction8(x, cc_state):
return cc_schedule.ApplyClippingWithState(cc_state, x, bits=8)
@tf.function(autograph=False)
def ExampleFunction16(x, cc_state):
return cc_schedule.ApplyClippingWithState(cc_state, x, bits=16)
a = tf.constant(1.0)
b = tf.constant(0.5)
# 8bit value.
v = ExampleFunction8(a * b, cc_schedule.GetState(cc_schedule.theta))
self.assertAllClose(v.eval(), 0.5)
# 16bit value.
v = ExampleFunction16(a * b, cc_schedule.GetState(cc_schedule.theta))
self.assertAllClose(v.eval(), 0.5)
# An incomplete implementation requires special case gradient logic.
# This tests it, specifically in a Defun, which caused issues.
# 8bit gradient.
g = tf.gradients(
ExampleFunction8(a * b, cc_schedule.GetState(cc_schedule.theta)),
[a, b])
g = [t.eval() for t in g]
print('Gradient8:', g)
self.assertAllClose(g, (0.5, 1.0))
# 16bit gradient.
g = tf.gradients(
ExampleFunction16(a * b, cc_schedule.GetState(cc_schedule.theta)),
[a, b])
g = [t.eval() for t in g]
print('Gradient16:', g)
self.assertAllClose(g, (0.5, 1.0))
def testFakeQuantizationScheduleTraining(self):
p = quant_utils.FakeQuantizationSchedule.Params()
p.clip_start_step = 5
p.clip_end_step = 10
p.quant_start_step = 15
p.start_cap = 6.0
p.end_cap = 1.0
with self.session():
cc_schedule = p.Instantiate()
self.evaluate(tf.global_variables_initializer())
# Step 0: No clipping.
self.assertAllClose(
self._ClipExample(cc_schedule, 100.0), (-100.0, 100.0))
self.assertAllClose(
self._ClipExample(cc_schedule, 0.123456),
(-0.123456, 0.123456)) # Not Quantized.
# Step 5: Clipping active but not yet quantizing.
self.evaluate(tf.assign(py_utils.GetOrCreateGlobalStepVar(), 5))
self.assertAllClose(
self._ClipExample(cc_schedule, 100.0),
(-6.0, 5.953125)) # 6 * 127/128
self.assertAllClose(
self._ClipExample(cc_schedule, 0.123456),
(-0.123456, 0.123456)) # Not Quantized.
# Step 7: Middle of clipping range.
self.evaluate(tf.assign(py_utils.GetOrCreateGlobalStepVar(), 7))
self.assertAllClose(
self._ClipExample(cc_schedule, 100.0), (-4.0, 3.96875)) # 4 * 127/128
self.assertAllClose(
self._ClipExample(cc_schedule, 0.123456),
(-0.123456, 0.123456)) # Not Quantized.
# Step 10: End of clipping range.
self.evaluate(tf.assign(py_utils.GetOrCreateGlobalStepVar(), 10))
self.assertAllClose(
self._ClipExample(cc_schedule, 100.0),
(-1.0, 0.9921875)) # 1 * 127/128
self.assertAllClose(
self._ClipExample(cc_schedule, 0.123456),
(-0.123456, 0.123456)) # Not Quantized.
# Step 11: No more clipping but not yet quantizing.
self.evaluate(tf.assign(py_utils.GetOrCreateGlobalStepVar(), 11))
self.assertAllClose(
self._ClipExample(cc_schedule, 100.0),
(-1.0, 0.9921875)) # 1 * 127/128
self.assertAllClose(
self._ClipExample(cc_schedule, 0.123456),
(-0.123456, 0.123456)) # Not Quantized.
# Step 15-16: Quantizing at full clip.
for step in (15, 16):
self.evaluate(tf.assign(py_utils.GetOrCreateGlobalStepVar(), step))
self.assertAllClose(
self._ClipExample(cc_schedule, 100.0),
(-1.0, 0.9921875)) # 1 * 127/128
self.assertAllClose(
self._ClipExample(cc_schedule, 0.123456),
(-0.125, 0.125)) # Quantized.
if __name__ == '__main__':
tf.test.main()
|
py | b4098237879137998f47ae7f6b37104f630df238 | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class MadbSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class MadbDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
py | b40982b495157dab0d11669c9d34f18dbe1674c8 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
class PreLayer:
def __init__(self, out_shape):
self.out_shape = out_shape
def test_MeanPooling():
from npdl.layers import MeanPooling
pool = MeanPooling((2, 2))
pool.connect_to(PreLayer((10, 1, 20, 30)))
assert pool.out_shape == (10, 1, 10, 15)
with pytest.raises(ValueError):
pool.forward(np.random.rand(10, 10))
with pytest.raises(ValueError):
pool.backward(np.random.rand(10, 20))
assert np.ndim(pool.forward(np.random.rand(10, 20, 30))) == 3
assert np.ndim(pool.backward(np.random.rand(10, 20, 30))) == 3
assert np.ndim(pool.forward(np.random.rand(10, 1, 20, 30))) == 4
assert np.ndim(pool.backward(np.random.rand(10, 1, 20, 30))) == 4
def test_MaxPooling():
from npdl.layers import MaxPooling
pool = MaxPooling((2, 2))
pool.connect_to(PreLayer((10, 1, 20, 30)))
assert pool.out_shape == (10, 1, 10, 15)
with pytest.raises(ValueError):
pool.forward(np.random.rand(10, 10))
with pytest.raises(ValueError):
pool.backward(np.random.rand(10, 20))
assert np.ndim(pool.forward(np.random.rand(10, 20, 30))) == 3
assert np.ndim(pool.backward(np.random.rand(10, 20, 30))) == 3
assert np.ndim(pool.forward(np.random.rand(10, 1, 20, 30))) == 4
assert np.ndim(pool.backward(np.random.rand(10, 1, 20, 30))) == 4
|
py | b40982be1a31d6218dd80011ec4621f74203ac3b | from panda3d.core import Vec4 as PandaVec4
class Vec4(PandaVec4):
def __round__(self, decimals=4):
return Vec4(*(round(e,decimals) for e in self))
def __repr__(self):
return f'Vec4({self[0]}, {self[1]}, {self[2]}, {self[3]})'
def __iadd__(self, value):
if len(value) % 3 == 0:
for i in range(0, len(value), 3):
self.add_x(value[i])
self.add_y(value[i+1])
self.add_z(value[i+2])
return self
if len(value) % 2 == 0:
for i in range(0, len(value), 2):
self.add_x(value[i])
self.add_y(value[i+1])
return self
def __add__(self, value):
if len(value) == 4:
return Vec4(self[0]+value[0], self[1]+value[1], self[2]+value[2], self[3]+value[3])
if len(value) == 3:
return Vec4(self[0]+value[0], self[1]+value[1], self[2]+value[2], self[3])
elif len(value) == 2:
return Vec4(self[0]+value[0], self[1]+value[1], self[2], self[3])
def __mul__(self, value):
if isinstance(value, (int, float, complex)):
return Vec4(*(e*value for e in self))
return Vec4(self[0]*value[0], self[1]*value[1], self[2]*value[2], self[3]*value[3])
__rmul__ = __mul__
def __truediv__(self, value):
if isinstance(value, (int, float, complex)):
return Vec4(*(e/value for e in self))
return Vec4(self[0]/value[0], self[1]/value[1], self[2]/value[2], self[3]/value[3])
if __name__ == '__main__':
a = Vec4(1,0,0,0) * 2
a = Vec4(1,0,1,1) * Vec4(2,1,2,3)
b = Vec4(1.252352324,0,1,.2)
b += Vec4(0,1)
# test
print(a)
print(round(b))
print('-----------', a * 2)
print('-----------', 2 * a)
|
py | b40982fa9f2deae3c6e2217d6278a821e5fbc181 | #!/usr/bin/env python3
"""Script to fill up EON with fake data"""
import os
from selfdrive.loggerd.config import ROOT, get_available_percent
from selfdrive.loggerd.tests.loggerd_tests_common import create_random_file
if __name__ == "__main__":
segment_idx = 0
while True:
seg_name = "1970-01-01--00-00-00--%d" % segment_idx
seg_path = os.path.join(ROOT, seg_name)
print(seg_path)
create_random_file(os.path.join(seg_path, 'fcamera.hevc'), 36)
create_random_file(os.path.join(seg_path, 'rlog.bz2'), 2)
segment_idx += 1
# Fill up to 99 percent
available_percent = get_available_percent()
if available_percent < 1.0:
break
|
py | b40983fee533b399c5ce7bd7952e2cced8b91a1b | # Copyright 2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for :mod:`pennylane.operation`.
"""
import pytest
from collections import OrderedDict
from pennylane.wires import Wires
import pennylane.numpy as np
import pennylane as qml
from pennylane.transforms.commutation_dag import simplify
class TestSimplifyRotation:
"""Commutation function tests."""
def test_simplify_rot(self):
"""Simplify rot operations with different parameters."""
rot_x = qml.Rot(np.pi / 2, 0.1, -np.pi / 2, wires=0)
simplify_rot_x = simplify(rot_x)
assert simplify_rot_x.name == "RX"
assert simplify_rot_x.data == [0.1]
assert np.allclose(simplify_rot_x.get_matrix(), rot_x.get_matrix())
rot_y = qml.Rot(0, 0.1, 0, wires=0)
simplify_rot_y = simplify(rot_y)
assert simplify_rot_y.name == "RY"
assert simplify_rot_y.data == [0.1]
assert np.allclose(simplify_rot_y.get_matrix(), rot_y.get_matrix())
rot_z = qml.Rot(0.1, 0, 0.2, wires=0)
simplify_rot_z = simplify(rot_z)
assert simplify_rot_z.name == "RZ"
assert np.allclose(simplify_rot_z.data, [0.3])
assert np.allclose(simplify_rot_z.get_matrix(), rot_z.get_matrix())
rot_h = qml.Rot(np.pi, np.pi / 2, 0, wires=0)
simplify_rot_h = simplify(rot_h)
assert simplify_rot_h.name == "Hadamard"
assert np.allclose(simplify_rot_h.get_matrix(), 1.0j * rot_h.get_matrix())
rot = qml.Rot(0.1, 0.2, 0.3, wires=0)
not_simplified_rot = simplify(rot)
assert not_simplified_rot.name == "Rot"
assert np.allclose(not_simplified_rot.get_matrix(), rot.get_matrix())
def test_simplify_crot(self):
"""Simplify CRot operations with different parameters."""
crot_x = qml.CRot(np.pi / 2, 0.1, -np.pi / 2, wires=[0, 1])
simplify_crot_x = simplify(crot_x)
assert simplify_crot_x.name == "CRX"
assert simplify_crot_x.data == [0.1]
assert np.allclose(simplify_crot_x.get_matrix(), crot_x.get_matrix())
crot_y = qml.CRot(0, 0.1, 0, wires=[0, 1])
simplify_crot_y = simplify(crot_y)
assert simplify_crot_y.name == "CRY"
assert simplify_crot_y.data == [0.1]
assert np.allclose(simplify_crot_y.get_matrix(), crot_y.get_matrix())
crot_z = qml.CRot(0.1, 0, 0.2, wires=[0, 1])
simplify_crot_z = simplify(crot_z)
assert simplify_crot_z.name == "CRZ"
assert np.allclose(simplify_crot_z.data, [0.3])
assert np.allclose(simplify_crot_z.get_matrix(), crot_z.get_matrix())
crot = qml.CRot(0.1, 0.2, 0.3, wires=[0, 1])
not_simplified_crot = simplify(crot)
assert not_simplified_crot.name == "CRot"
assert np.allclose(not_simplified_crot.get_matrix(), crot.get_matrix())
def test_simplify_u2(self):
"""Simplify u2 operations with different parameters."""
u2_x = qml.U2(-np.pi / 2, np.pi / 2, wires=0)
simplify_u2_x = simplify(u2_x)
assert simplify_u2_x.name == "RX"
assert simplify_u2_x.data == [np.pi / 2]
assert np.allclose(simplify_u2_x.get_matrix(), u2_x.get_matrix())
u2_y = qml.U2(-2 * np.pi, 2 * np.pi, wires=0)
simplify_u2_y = simplify(u2_y)
assert simplify_u2_y.name == "RY"
assert simplify_u2_y.data == [np.pi / 2]
assert np.allclose(simplify_u2_y.get_matrix(), u2_y.get_matrix())
u2 = qml.U2(0.1, 0.2, wires=0)
u2_not_simplified = simplify(u2)
assert u2_not_simplified.name == "U2"
assert u2_not_simplified.data == [0.1, 0.2]
assert np.allclose(u2_not_simplified.get_matrix(), u2.get_matrix())
def test_simplify_u3(self):
"""Simplify u3 operations with different parameters."""
u3_x = qml.U3(0.1, -np.pi / 2, np.pi / 2, wires=0)
simplify_u3_x = simplify(u3_x)
assert simplify_u3_x.name == "RX"
assert simplify_u3_x.data == [0.1]
assert np.allclose(simplify_u3_x.get_matrix(), u3_x.get_matrix())
u3_y = qml.U3(0.1, 0.0, 0.0, wires=0)
simplify_u3_y = simplify(u3_y)
assert simplify_u3_y.name == "RY"
assert simplify_u3_y.data == [0.1]
assert np.allclose(simplify_u3_y.get_matrix(), u3_y.get_matrix())
u3_z = qml.U3(0.0, 0.1, 0.0, wires=0)
simplify_u3_z = simplify(u3_z)
assert simplify_u3_z.name == "PhaseShift"
assert simplify_u3_z.data == [0.1]
assert np.allclose(simplify_u3_z.get_matrix(), u3_z.get_matrix())
u3 = qml.U3(0.1, 0.2, 0.3, wires=0)
u3_not_simplified = simplify(u3)
assert u3_not_simplified.name == "U3"
assert u3_not_simplified.data == [0.1, 0.2, 0.3]
assert np.allclose(u3_not_simplified.get_matrix(), u3.get_matrix())
def test_simplify_not_rotations(self):
"""Test that the simplify function returns a warning when giving a non rotation operation as argument."""
id = qml.Identity(wires=0)
with pytest.raises(
qml.QuantumFunctionError, match="Identity is not a Rot, U2, U3 or CRot."
):
simplify(id)
class TestCommutingFunction:
"""Commutation function tests."""
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [1, 0]], False),
([[1, 0], [1, 0]], True),
([[0, 1], [2, 3]], True),
([[0, 1], [3, 1]], True),
],
)
def test_cnot(self, wires, res):
"""Commutation between two CNOTs."""
commutation = qml.is_commuting(qml.CNOT(wires=wires[0]), qml.CNOT(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[1, 2], [1, 0, 2]], True),
([[1, 2], [0, 1, 2]], True),
([[3, 2], [0, 1, 2]], True),
([[0, 1], [0, 1, 2]], False),
],
)
def test_cnot_toffoli(self, wires, res):
"""Commutation between CNOT and Toffoli"""
commutation = qml.is_commuting(qml.CNOT(wires=wires[0]), qml.Toffoli(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[1, 2], [1, 0]], True),
([[0, 1], [0, 1]], False),
([[0, 1], [2, 0]], True),
([[0, 1], [0, 2]], True),
],
)
def test_cnot_cz(self, wires, res):
"""Commutation between CNOT and CZ"""
commutation = qml.is_commuting(qml.CNOT(wires=wires[0]), qml.CZ(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [0, 1, 2]], True),
([[0, 2], [0, 1, 2]], True),
([[0, 2], [0, 2, 1]], True),
],
)
def test_cz_mcz(self, wires, res):
"""Commutation between CZ and MCZ."""
def z():
qml.PauliZ(wires=wires[1][2])
commutation = qml.is_commuting(
qml.CZ(wires=wires[0]), qml.transforms.ctrl(z, control=wires[1][:-1])()
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [0, 1, 2]], True),
([[0, 2], [0, 1, 2]], True),
([[0, 2], [0, 2, 1]], True),
],
)
def test_mcz_cz(self, wires, res):
"""Commutation between MCZ and CZ"""
def z():
qml.PauliZ(wires=wires[1][2])
commutation = qml.is_commuting(
qml.transforms.ctrl(z, control=wires[1][:-1])(), qml.CZ(wires=wires[0])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0], [0, 1, 2]], False),
([[1], [0, 1, 2]], False),
([[2], [0, 1, 2]], False),
],
)
def test_rx_mcz(self, wires, res):
"""Commutation between RX and MCZ"""
def z():
qml.PauliZ(wires=wires[1][2])
commutation = qml.is_commuting(
qml.RX(0.1, wires=wires[0][0]), qml.transforms.ctrl(z, control=wires[1][:-1])()
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0], [0, 1, 2]], True),
([[1], [0, 1, 2]], True),
([[2], [0, 1, 2]], True),
],
)
def test_rx_mcz(self, wires, res):
"""Commutation between MCZ and RZ"""
def z():
qml.PauliZ(wires=wires[1][2])
commutation = qml.is_commuting(
qml.transforms.ctrl(z, control=wires[1][:-1])(), qml.RZ(0.1, wires=wires[0][0])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1, 2], [0, 1, 2]], True),
([[0, 2, 1], [0, 1, 2]], True),
([[1, 2, 0], [0, 2, 1]], True),
],
)
def test_mcz_mcz(self, wires, res):
"""Commutation between MCZ and MCZ."""
def z_1():
qml.PauliZ(wires=wires[0][2])
def z_2():
qml.PauliZ(wires=wires[1][2])
commutation = qml.is_commuting(
qml.transforms.ctrl(z_1, control=wires[0][:-1])(),
qml.transforms.ctrl(z_2, control=wires[1][:-1])(),
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [0, 1, 2]], False),
([[0, 2], [0, 1, 2]], False),
([[0, 2], [0, 2, 1]], False),
([[0, 3], [0, 2, 1]], True),
([[0, 3], [1, 2, 0]], True),
],
)
def test_cnot_mcz(self, wires, res):
"""Commutation between CNOT and MCZ."""
def z():
qml.PauliZ(wires=wires[1][2])
commutation = qml.is_commuting(
qml.CNOT(wires=wires[0]), qml.transforms.ctrl(z, control=wires[1][:-1])()
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[1], [0, 1]], True),
([[0], [0, 1]], False),
([[2], [0, 1]], True),
],
)
def test_x_cnot(self, wires, res):
"""Commutation between PauliX and CNOT."""
commutation = qml.is_commuting(qml.PauliX(wires=wires[0]), qml.CNOT(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[1], [0, 1]], True),
([[0], [0, 1]], False),
([[2], [0, 1]], True),
],
)
def test_cnot_x(self, wires, res):
"""Commutation between CNOT and PauliX."""
commutation = qml.is_commuting(qml.CNOT(wires=wires[1]), qml.PauliX(wires=wires[0]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[1], [0, 1]], False),
([[0], [0, 1]], False),
([[2], [0, 1]], True),
],
)
def test_x_cy(self, wires, res):
"""Commutation between PauliX and CY."""
commutation = qml.is_commuting(qml.PauliX(wires=wires[0]), qml.CY(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 2], [0, 1, 2]], False),
([[0, 1], [0, 1, 2]], False),
([[0, 3], [0, 1, 2]], True),
([[1, 2], [0, 1, 2]], False),
],
)
def test_cnot_cswap(self, wires, res):
"""Commutation between CNOT and CSWAP."""
commutation = qml.is_commuting(qml.CNOT(wires=wires[0]), qml.CSWAP(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1, 2], [1, 2]], False),
],
)
def test_cswap_cnot(self, wires, res):
"""Commutation between CSWAP and CNOT."""
commutation = qml.is_commuting(qml.CSWAP(wires=wires[0]), qml.CNOT(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1, 2], [2, 1, 0]], False),
],
)
def test_cswap_cswap(self, wires, res):
"""Commutation between CSWAP and CSWAP."""
commutation = qml.is_commuting(qml.CSWAP(wires=wires[0]), qml.CSWAP(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [0, 1]], False),
],
)
def test_cnot_swap(self, wires, res):
"""Commutation between CNOT and SWAP."""
commutation = qml.is_commuting(qml.CNOT(wires=wires[0]), qml.SWAP(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [0, 1]], False),
],
)
def test_swap_cnot(self, wires, res):
"""Commutation between SWAP and CNOT."""
commutation = qml.is_commuting(qml.SWAP(wires=wires[0]), qml.CNOT(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 2], [0, 1, 2]], False),
([[0, 1], [0, 1, 2]], False),
([[0, 3], [0, 1, 2]], True),
],
)
def test_cz_cswap(self, wires, res):
"""Commutation between CZ and CSWAP."""
commutation = qml.is_commuting(qml.CZ(wires=wires[0]), qml.CSWAP(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 2], [0, 1, 2, 3]], False),
([[0, 1], [0, 1, 2, 3]], False),
([[0, 3], [0, 1, 2, 3]], True),
],
)
def test_cnot_multicx(self, wires, res):
"""Commutation between CNOT and MultiControlledX."""
commutation = qml.is_commuting(
qml.CNOT(wires=wires[0]),
qml.MultiControlledX(
control_wires=wires[1][0:3], wires=wires[1][-1], control_values="111"
),
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [0]], True),
([[0, 1], [1]], True),
],
)
def test_cphase_z(self, wires, res):
"""Commutation between CPhase and PauliZ."""
commutation = qml.is_commuting(qml.CPhase(0.2, wires=wires[0]), qml.PauliZ(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [0]], True),
([[0, 1], [1]], True),
],
)
def test_cphase_phase(self, wires, res):
"""Commutation between CPhase and Phase."""
commutation = qml.is_commuting(
qml.CPhase(0.2, wires=wires[0]), qml.PhaseShift(0.1, wires=wires[1])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [0]], False),
([[0, 1], [1]], False),
],
)
def test_cphase_paulix(self, wires, res):
"""Commutation between CPhase and PauliX."""
commutation = qml.is_commuting(qml.CPhase(0.2, wires=wires[0]), qml.PauliX(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [0]], True),
([[0, 1], [1]], True),
],
)
def test_cphase_zero_paulix(self, wires, res):
"""Commutation between CPhase(0.0) and PauliX."""
commutation = qml.is_commuting(qml.CPhase(0.0, wires=wires[0]), qml.PauliX(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [0]], True),
([[0, 1], [1]], False),
],
)
def test_crx_pauliz(self, wires, res):
"""Commutation between CRX(0.1) and PauliZ."""
commutation = qml.is_commuting(qml.CRX(0.1, wires=wires[0]), qml.PauliZ(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [0]], True),
([[0, 1], [1]], True),
],
)
def test_crx_zero_pauliz(self, wires, res):
"""Commutation between CRX(0.0) and PauliZ."""
commutation = qml.is_commuting(qml.CRX(0.0, wires=wires[0]), qml.PauliZ(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [0]], False),
([[0, 1], [1]], False),
],
)
def test_crz_paulix(self, wires, res):
"""Commutation between CRZ(0.1) and PauliX."""
commutation = qml.is_commuting(qml.CRZ(0.1, wires=wires[0]), qml.PauliX(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [0]], True),
([[0, 1], [1]], True),
],
)
def test_crz_zero_paulix(self, wires, res):
"""Commutation between CRZ(0.0) and PauliX."""
commutation = qml.is_commuting(qml.CRZ(0.0, wires=wires[0]), qml.PauliX(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [0]], False),
([[0, 1], [1]], False),
],
)
def test_cry_hadamard(self, wires, res):
"""Commutation between CRY(0.1) and Hadamard."""
commutation = qml.is_commuting(qml.CRY(0.1, wires=wires[0]), qml.Hadamard(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [0]], True),
([[0, 1], [1]], True),
],
)
def test_cry_zero_hadamard(self, wires, res):
"""Commutation between CRY(0.0) and Hadamard."""
commutation = qml.is_commuting(qml.CRY(0.0, wires=wires[0]), qml.Hadamard(wires=wires[1]))
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0], [0]], True),
([[0], [1]], True),
],
)
def test_rot_x_simplified(self, wires, res):
"""Commutation between Rot(np.pi / 2, 0.1, -np.pi / 2) and PauliX."""
commutation = qml.is_commuting(
qml.Rot(np.pi / 2, 0.1, -np.pi / 2, wires=wires[0]), qml.PauliX(wires=wires[1])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0], [0]], True),
([[0], [1]], True),
],
)
def test_rot_y_simplified(self, wires, res):
"""Commutation between Rot(0, 0.1, 0) and PauliY."""
commutation = qml.is_commuting(
qml.Rot(0, 0.1, 0, wires=wires[0]), qml.PauliY(wires=wires[1])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0], [0]], True),
([[0], [1]], True),
],
)
def test_rot_z_simplified(self, wires, res):
"""Commutation between Rot(0.1, 0.0, 0.2) and PauliZ."""
commutation = qml.is_commuting(
qml.Rot(0.1, 0, 0.2, wires=wires[0]), qml.PauliZ(wires=wires[1])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0], [0]], True),
([[0], [1]], True),
],
)
def test_rot_hadamard_simplified(self, wires, res):
"""Commutation between Rot(np.pi, np.pi / 2, 0) and Hadamard."""
commutation = qml.is_commuting(
qml.Rot(np.pi, np.pi / 2, 0, wires=wires[0]), qml.Hadamard(wires=wires[1])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0], [0]], False),
([[0], [1]], True),
],
)
def test_rot_z(self, wires, res):
"""Commutation between Rot(0.1, 0.2, 0.3) and PauliZ."""
commutation = qml.is_commuting(
qml.Rot(0.1, 0.2, 0.3, wires=wires[0]), qml.PauliZ(wires=wires[1])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [1]], True),
([[0, 1], [0]], False),
],
)
def test_crot_x_simplified(self, wires, res):
"""Commutation between CRot(np.pi / 2, 0.1, -np.pi / 2) and PauliX."""
commutation = qml.is_commuting(
qml.CRot(np.pi / 2, 0.1, -np.pi / 2, wires=wires[0]), qml.PauliX(wires=wires[1])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [1]], True),
([[0, 1], [0]], False),
],
)
def test_crot_y_simplified(self, wires, res):
"""Commutation between CRot(0, 0.1, 0) and PauliY."""
commutation = qml.is_commuting(
qml.CRot(0, 0.1, 0, wires=wires[0]), qml.PauliY(wires=wires[1])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [1]], True),
([[0, 1], [0]], True),
],
)
def test_crot_z_simplified(self, wires, res):
"""Commutation between CRot(0.1, 0, 0.2) and PauliZ."""
commutation = qml.is_commuting(
qml.CRot(0.1, 0, 0.2, wires=wires[0]), qml.PauliZ(wires=wires[1])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [1]], True),
([[0, 1], [0]], False),
],
)
def test_crot_hadamard_simplified(self, wires, res):
"""Commutation between CRot(np.pi, np.pi / 2, 0) and Hadamard."""
commutation = qml.is_commuting(
qml.CRot(np.pi, np.pi / 2, 0, wires=wires[0]), qml.Hadamard(wires=wires[1])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [1]], False),
([[0, 1], [0]], True),
],
)
def test_crot_z(self, wires, res):
"""Commutation between CRot(0.1, 0.2, 0.3) and PauliZ."""
commutation = qml.is_commuting(
qml.CRot(0.1, 0.2, 0.3, wires=wires[0]), qml.PauliZ(wires=wires[1])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0], [0]], True),
([[0], [1]], True),
],
)
def test_u2_y_simplified(self, wires, res):
"""Commutation between U2(2*np.pi, -2*np.pi) and PauliY."""
commutation = qml.is_commuting(
qml.U2(2 * np.pi, -2 * np.pi, wires=wires[0]), qml.PauliY(wires=wires[1])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0], [0]], True),
([[0], [1]], True),
],
)
def test_u2_x_simplified(self, wires, res):
"""Commutation between U2(np.pi/2, -np.pi/2) and PauliX."""
commutation = qml.is_commuting(
qml.U2(np.pi / 2, -np.pi / 2, wires=wires[0]), qml.PauliX(wires=wires[1])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0], [0]], False),
([[0], [1]], True),
],
)
def test_u2_u2(self, wires, res):
"""Commutation between U2(0.1, 0.2) and U2(0.3, 0.1)."""
commutation = qml.is_commuting(
qml.U2(0.1, 0.2, wires=wires[0]), qml.U2(0.3, 0.1, wires=wires[1])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [0]], False),
([[0, 1], [1]], False),
],
)
def test_crot_u2(self, wires, res):
"""Commutation between CRot(0.1, 0.2, 0.3) and U2(0.4, 0.5)."""
commutation = qml.is_commuting(
qml.CRot(0.1, 0.2, 0.3, wires=wires[0]), qml.U2(0.4, 0.5, wires=wires[1])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [0]], False),
([[0, 1], [1]], False),
],
)
def test_u2_crot(self, wires, res):
"""Commutation between U2(0.1, 0.2) and CRot(0.3, 0.4, 0.5)."""
commutation = qml.is_commuting(
qml.U2(0.1, 0.2, wires=wires[1]), qml.CRot(0.3, 0.4, 0.5, wires=wires[0])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0, 1], [0, 1]], False),
([[0, 1], [1, 0]], False),
([[0, 2], [0, 1]], True),
([[0, 2], [1, 2]], False),
],
)
def test_crot_crot(self, wires, res):
"""Commutation between CRot(0.1, 0.2, 0.3) and CRot(0.3, 0.4, 0.5)."""
commutation = qml.is_commuting(
qml.CRot(0.1, 0.2, 0.3, wires=wires[1]), qml.CRot(0.3, 0.4, 0.5, wires=wires[0])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0], [0]], True),
([[0], [1]], True),
],
)
def test_u3_simplified_z(self, wires, res):
"""Commutation between U3(0.0, 0.1, 0.0) and PauliZ."""
commutation = qml.is_commuting(
qml.U3(0.0, 0.1, 0.0, wires=wires[1]), qml.PauliZ(wires=wires[0])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0], [0]], True),
([[0], [1]], True),
],
)
def test_u3_simplified_y(self, wires, res):
"""Commutation between U3(0.1, 0.0, 0.0) and PauliY."""
commutation = qml.is_commuting(
qml.U3(0.1, 0.0, 0.0, wires=wires[1]), qml.PauliY(wires=wires[0])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0], [0]], True),
([[0], [1]], True),
],
)
def test_u3_simplified_x(self, wires, res):
"""Commutation between U3(0.1, -np.pi/2, np.pi/2) and PauliX."""
commutation = qml.is_commuting(
qml.U3(0.1, -np.pi / 2, np.pi / 2, wires=wires[0]), qml.PauliX(wires=wires[1])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0], [0]], False),
([[0], [1]], True),
],
)
def test_u3_rot(self, wires, res):
"""Commutation between U3(0.1, 0.2, 0.3) and Rot(0.3, 0.2, 0.1)."""
commutation = qml.is_commuting(
qml.U3(0.1, 0.2, 0.3, wires=wires[0]), qml.Rot(0.3, 0.2, 0.1, wires=wires[1])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0], [0]], False),
([[0], [1]], True),
],
)
def test_u3_identity_barrier(self, wires, res):
"""Commutation between U3(0.0, 0.0, 0.0) and Barrier."""
commutation = qml.is_commuting(
qml.U3(0.0, 0.0, 0.0, wires=wires[0]), qml.Barrier(wires=wires[1])
)
assert commutation == res
@pytest.mark.parametrize(
"wires,res",
[
([[0], [0]], False),
([[0], [1]], True),
],
)
def test_u3_identity_barrier(self, wires, res):
"""Commutation between Barrier and U3(0.0, 0.0, 0.0)."""
commutation = qml.is_commuting(
qml.Barrier(wires=wires[1]), qml.U3(0.0, 0.0, 0.0, wires=wires[0])
)
assert commutation == res
def test_operation_1_not_supported(self):
"""Test that giving a non supported operation raises an error."""
rho = np.zeros((2**1, 2**1), dtype=np.complex128)
rho[0, 0] = 1
with pytest.raises(
qml.QuantumFunctionError, match="Operation QubitDensityMatrix not supported."
):
qml.is_commuting(qml.QubitDensityMatrix(rho, wires=[0]), qml.PauliX(wires=0))
def test_operation_2_not_supported(self):
"""Test that giving a non supported operation raises an error."""
with pytest.raises(qml.QuantumFunctionError, match="Operation PauliRot not supported."):
qml.is_commuting(qml.PauliX(wires=0), qml.PauliRot(1, "X", wires=0))
def test_operation_1_multiple_targets(self):
"""Test that giving a multiple target controlled operation raises an error."""
def op():
qml.PauliZ(wires=2)
qml.PauliY(wires=2)
with pytest.raises(
qml.QuantumFunctionError, match="MultipleTargets controlled is not supported."
):
qml.is_commuting(qml.transforms.ctrl(op, control=[0, 1])(), qml.PauliX(wires=0))
def test_operation_2_multiple_targets(self):
"""Test that giving a multiple target controlled operation raises an error."""
def op():
qml.PauliZ(wires=2)
qml.PauliY(wires=2)
with pytest.raises(
qml.QuantumFunctionError, match="MultipleTargets controlled is not supported."
):
qml.is_commuting(qml.PauliX(wires=0), qml.transforms.ctrl(op, control=[0, 1])())
def test_non_commuting(self):
"""Test the function with an operator from the non-commuting list."""
res = qml.is_commuting(qml.PauliX(wires=0), qml.QFT(wires=[1, 0]))
assert res == False
class TestCommutationDAG:
"""Commutation DAG tests."""
def test_return_dag(self):
def circuit():
qml.PauliZ(wires=0)
dag_object = qml.transforms.commutation_dag(circuit)()
dag = dag_object.graph
assert len(dag) != 0
def test_dag_invalid_argument(self):
"""Assert error raised when input is neither a tape, QNode, nor quantum function"""
with pytest.raises(ValueError, match="Input is not a tape, QNode, or quantum function"):
qml.transforms.commutation_dag(qml.PauliZ(0))()
def test_dag_wrong_function(self):
"""Assert error raised when input function is not a quantum function"""
def test_function(x):
return x
with pytest.raises(ValueError, match="Function contains no quantum operation"):
qml.transforms.commutation_dag(test_function)(1)
def test_dag_transform_simple_dag_function(self):
"""Test a simple DAG on 1 wire with a quantum function."""
def circuit():
qml.PauliZ(wires=0)
qml.PauliX(wires=0)
dag = qml.transforms.commutation_dag(circuit)()
a = qml.PauliZ(wires=0)
b = qml.PauliX(wires=0)
nodes = [a, b]
edges = [(0, 1, {"commute": False})]
assert dag.get_node(0).op.compare(a)
assert dag.get_node(1).op.compare(b)
assert dag.get_edge(0, 1) == {0: {"commute": False}}
assert dag.get_edge(0, 2) is None
assert dag.observables == []
for i, node in enumerate(dag.get_nodes()):
assert node[1].op.compare(nodes[i])
for i, edge in enumerate(dag.get_edges()):
assert edges[i] == edge
def test_dag_transform_simple_dag_tape(self):
"""Test a simple DAG on 1 wire with a quantum tape."""
with qml.tape.QuantumTape() as tape:
qml.PauliZ(wires=0)
qml.PauliX(wires=0)
dag = qml.transforms.commutation_dag(tape)()
a = qml.PauliZ(wires=0)
b = qml.PauliX(wires=0)
nodes = [a, b]
edges = [(0, 1, {"commute": False})]
assert dag.get_node(0).op.compare(a)
assert dag.get_node(1).op.compare(b)
assert dag.get_edge(0, 1) == {0: {"commute": False}}
assert dag.get_edge(0, 2) is None
assert dag.observables == []
for i, node in enumerate(dag.get_nodes()):
assert node[1].op.compare(nodes[i])
for i, edge in enumerate(dag.get_edges()):
assert edges[i] == edge
def test_dag_transform_simple_dag_function_custom_wire(self):
"""Test a simple DAG on 2 wires with a quantum function and custom wires."""
def circuit():
qml.PauliZ(wires="a")
qml.PauliX(wires="c")
dag = qml.transforms.commutation_dag(circuit)()
a = qml.PauliZ(wires=0)
b = qml.PauliX(wires=1)
nodes = [a, b]
edges = [(0, 1, {"commute": False})]
assert dag.get_node(0).op.compare(a)
assert dag.get_node(1).op.compare(b)
assert dag.get_edge(0, 1) is None
assert dag.get_edge(0, 2) is None
assert dag.observables == []
for i, node in enumerate(dag.get_nodes()):
assert node[1].op.compare(nodes[i])
for i, edge in enumerate(dag.get_edges()):
assert edges[i] == edge
def test_dag_transform_simple_dag_qnode(self):
"""Test a simple DAG on 1 wire with a qnode."""
dev = qml.device("default.qubit", wires=1)
@qml.qnode(dev)
def circuit():
qml.PauliZ(wires=0)
qml.PauliX(wires=0)
return qml.expval(qml.PauliX(wires=0))
dag = qml.transforms.commutation_dag(circuit)()
a = qml.PauliZ(wires=0)
b = qml.PauliX(wires=0)
nodes = [a, b]
edges = [(0, 1, {"commute": False})]
assert dag.get_node(0).op.compare(a)
assert dag.get_node(1).op.compare(b)
assert dag.get_edge(0, 1) == {0: {"commute": False}}
assert dag.get_edge(0, 2) is None
assert dag.observables[0].return_type.__repr__() == "expval"
assert dag.observables[0].name == "PauliX"
assert dag.observables[0].wires.tolist() == [0]
for i, node in enumerate(dag.get_nodes()):
assert node[1].op.compare(nodes[i])
for i, edge in enumerate(dag.get_edges()):
assert edges[i] == edge
def test_dag_pattern(self):
"Test a the DAG and its attributes for a more complicated circuit."
def circuit():
qml.CNOT(wires=[3, 0])
qml.PauliX(wires=4)
qml.PauliZ(wires=0)
qml.CNOT(wires=[4, 2])
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[3, 4])
qml.CNOT(wires=[1, 2])
qml.PauliX(wires=1)
qml.CNOT(wires=[1, 0])
qml.PauliX(wires=1)
qml.CNOT(wires=[1, 2])
qml.CNOT(wires=[0, 3])
dag = qml.transforms.commutation_dag(circuit)()
wires = [3, 0, 4, 2, 1]
consecutive_wires = Wires(range(len(wires)))
wires_map = OrderedDict(zip(wires, consecutive_wires))
nodes = [
qml.CNOT(wires=[3, 0]),
qml.PauliX(wires=4),
qml.PauliZ(wires=0),
qml.CNOT(wires=[4, 2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[3, 4]),
qml.CNOT(wires=[1, 2]),
qml.PauliX(wires=1),
qml.CNOT(wires=[1, 0]),
qml.PauliX(wires=1),
qml.CNOT(wires=[1, 2]),
qml.CNOT(wires=[0, 3]),
]
for node in nodes:
node._wires = Wires([wires_map[wire] for wire in node.wires.tolist()])
edges = [
(0, 2, {"commute": False}),
(0, 4, {"commute": False}),
(1, 3, {"commute": False}),
(2, 8, {"commute": False}),
(3, 5, {"commute": False}),
(4, 6, {"commute": False}),
(5, 11, {"commute": False}),
(6, 7, {"commute": False}),
(7, 8, {"commute": False}),
(8, 9, {"commute": False}),
(8, 11, {"commute": False}),
(9, 10, {"commute": False}),
]
direct_successors = [[2, 4], [3], [8], [5], [6], [11], [7], [8], [9, 11], [10], [], []]
successors = [
[2, 4, 6, 7, 8, 9, 10, 11],
[3, 5, 11],
[8, 9, 10, 11],
[5, 11],
[6, 7, 8, 9, 10, 11],
[11],
[7, 8, 9, 10, 11],
[8, 9, 10, 11],
[9, 10, 11],
[10],
[],
[],
]
direct_predecessors = [[], [], [0], [1], [0], [3], [4], [6], [2, 7], [8], [9], [5, 8]]
predecessors = [
[],
[],
[0],
[1],
[0],
[1, 3],
[0, 4],
[0, 4, 6],
[0, 2, 4, 6, 7],
[0, 2, 4, 6, 7, 8],
[0, 2, 4, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
]
assert dag.observables == []
for i in range(0, 12):
assert dag.get_node(i).op.name == nodes[i].name
assert dag.get_node(i).op.wires == nodes[i].wires
assert dag.direct_successors(i) == direct_successors[i]
assert dag.get_node(i).successors == successors[i] == dag.successors(i)
assert dag.direct_predecessors(i) == direct_predecessors[i]
assert dag.get_node(i).predecessors == predecessors[i] == dag.predecessors(i)
for i, edge in enumerate(dag.get_edges()):
assert edges[i] == edge
def test_dag_parameters_autograd(self):
"Test a the DAG and its attributes for autograd parameters."
dev = qml.device("default.qubit", wires=3)
@qml.qnode(dev)
def circuit(x, y, z):
qml.RX(x, wires=0)
qml.RX(y, wires=0)
qml.CNOT(wires=[1, 2])
qml.RY(y, wires=1)
qml.Hadamard(wires=2)
qml.CRZ(z, wires=[2, 0])
qml.RY(-y, wires=1)
return qml.expval(qml.PauliZ(0))
x = np.array([np.pi / 4, np.pi / 3, np.pi / 2], requires_grad=False)
get_dag = qml.transforms.commutation_dag(circuit)
dag = get_dag(x[0], x[1], x[2])
nodes = [
qml.RX(x[0], wires=0),
qml.RX(x[1], wires=0),
qml.CNOT(wires=[1, 2]),
qml.RY(x[1], wires=1),
qml.Hadamard(wires=2),
qml.CRZ(x[2], wires=[2, 0]),
qml.RY(-x[1], wires=1),
]
edges = [
(0, 5, {"commute": False}),
(1, 5, {"commute": False}),
(2, 3, {"commute": False}),
(2, 4, {"commute": False}),
(2, 6, {"commute": False}),
(4, 5, {"commute": False}),
]
direct_successors = [[5], [5], [3, 4, 6], [], [5], [], []]
successors = [[5], [5], [3, 4, 5, 6], [], [5], [], []]
direct_predecessors = [[], [], [], [2], [2], [0, 1, 4], [2]]
predecessors = [[], [], [], [2], [2], [0, 1, 2, 4], [2]]
for i in range(0, 7):
assert dag.get_node(i).op.name == nodes[i].name
assert dag.get_node(i).op.data == nodes[i].data
assert dag.get_node(i).op.wires == nodes[i].wires
assert dag.direct_successors(i) == direct_successors[i]
assert dag.get_node(i).successors == successors[i] == dag.successors(i)
assert dag.direct_predecessors(i) == direct_predecessors[i]
assert dag.get_node(i).predecessors == predecessors[i] == dag.predecessors(i)
for i, edge in enumerate(dag.get_edges()):
assert edges[i] == edge
def test_dag_parameters_tf(self):
"Test a the DAG and its attributes for tensorflow parameters."
tf = pytest.importorskip("tensorflow")
dev = qml.device("default.qubit", wires=3)
@qml.qnode(dev)
def circuit(x, y, z):
qml.RX(x, wires=0)
qml.RX(y, wires=0)
qml.CNOT(wires=[1, 2])
qml.RY(y, wires=1)
qml.Hadamard(wires=2)
qml.CRZ(z, wires=[2, 0])
qml.RY(-y, wires=1)
return qml.expval(qml.PauliZ(0))
x = tf.Variable([np.pi / 4, np.pi / 3, np.pi / 2], dtype=tf.float64)
get_dag = qml.transforms.commutation_dag(circuit)
dag = get_dag(x[0], x[1], x[2])
nodes = [
qml.RX(x[0], wires=0),
qml.RX(x[1], wires=0),
qml.CNOT(wires=[1, 2]),
qml.RY(x[1], wires=1),
qml.Hadamard(wires=2),
qml.CRZ(x[2], wires=[2, 0]),
qml.RY(-x[1], wires=1),
]
edges = [
(0, 5, {"commute": False}),
(1, 5, {"commute": False}),
(2, 3, {"commute": False}),
(2, 4, {"commute": False}),
(2, 6, {"commute": False}),
(4, 5, {"commute": False}),
]
direct_successors = [[5], [5], [3, 4, 6], [], [5], [], []]
successors = [[5], [5], [3, 4, 5, 6], [], [5], [], []]
direct_predecessors = [[], [], [], [2], [2], [0, 1, 4], [2]]
predecessors = [[], [], [], [2], [2], [0, 1, 2, 4], [2]]
for i in range(0, 7):
assert dag.get_node(i).op.name == nodes[i].name
assert dag.get_node(i).op.data == nodes[i].data
assert dag.get_node(i).op.wires == nodes[i].wires
assert dag.direct_successors(i) == direct_successors[i]
assert dag.get_node(i).successors == successors[i] == dag.successors(i)
assert dag.direct_predecessors(i) == direct_predecessors[i]
assert dag.get_node(i).predecessors == predecessors[i] == dag.predecessors(i)
for i, edge in enumerate(dag.get_edges()):
assert edges[i] == edge
def test_dag_parameters_torch(self):
"Test a the DAG and its attributes for torch parameters."
torch = pytest.importorskip("torch", minversion="1.8")
dev = qml.device("default.qubit", wires=3)
@qml.qnode(dev)
def circuit(x, y, z):
qml.RX(x, wires=0)
qml.RX(y, wires=0)
qml.CNOT(wires=[1, 2])
qml.RY(y, wires=1)
qml.Hadamard(wires=2)
qml.CRZ(z, wires=[2, 0])
qml.RY(-y, wires=1)
return qml.expval(qml.PauliZ(0))
x = torch.tensor([np.pi / 4, np.pi / 3, np.pi / 2], requires_grad=False)
get_dag = qml.transforms.commutation_dag(circuit)
dag = get_dag(x[0], x[1], x[2])
nodes = [
qml.RX(x[0], wires=0),
qml.RX(x[1], wires=0),
qml.CNOT(wires=[1, 2]),
qml.RY(x[1], wires=1),
qml.Hadamard(wires=2),
qml.CRZ(x[2], wires=[2, 0]),
qml.RY(-x[1], wires=1),
]
edges = [
(0, 5, {"commute": False}),
(1, 5, {"commute": False}),
(2, 3, {"commute": False}),
(2, 4, {"commute": False}),
(2, 6, {"commute": False}),
(4, 5, {"commute": False}),
]
direct_successors = [[5], [5], [3, 4, 6], [], [5], [], []]
successors = [[5], [5], [3, 4, 5, 6], [], [5], [], []]
direct_predecessors = [[], [], [], [2], [2], [0, 1, 4], [2]]
predecessors = [[], [], [], [2], [2], [0, 1, 2, 4], [2]]
for i in range(0, 7):
assert dag.get_node(i).op.name == nodes[i].name
assert dag.get_node(i).op.data == nodes[i].data
assert dag.get_node(i).op.wires == nodes[i].wires
assert dag.direct_successors(i) == direct_successors[i]
assert dag.get_node(i).successors == successors[i] == dag.successors(i)
assert dag.direct_predecessors(i) == direct_predecessors[i]
assert dag.get_node(i).predecessors == predecessors[i] == dag.predecessors(i)
for i, edge in enumerate(dag.get_edges()):
assert edges[i] == edge
def test_dag_parameters_jax(self):
"Test a the DAG and its attributes for jax parameters."
jax = pytest.importorskip("jax")
from jax import numpy as jnp
dev = qml.device("default.qubit", wires=3)
@qml.qnode(dev)
def circuit(x, y, z):
qml.RX(x, wires=0)
qml.RX(y, wires=0)
qml.CNOT(wires=[1, 2])
qml.RY(y, wires=1)
qml.Hadamard(wires=2)
qml.CRZ(z, wires=[2, 0])
qml.RY(-y, wires=1)
return qml.expval(qml.PauliZ(0))
x = jnp.array([np.pi / 4, np.pi / 3, np.pi / 2], dtype=jnp.float64)
get_dag = qml.transforms.commutation_dag(circuit)
dag = get_dag(x[0], x[1], x[2])
nodes = [
qml.RX(x[0], wires=0),
qml.RX(x[1], wires=0),
qml.CNOT(wires=[1, 2]),
qml.RY(x[1], wires=1),
qml.Hadamard(wires=2),
qml.CRZ(x[2], wires=[2, 0]),
qml.RY(-x[1], wires=1),
]
edges = [
(0, 5, {"commute": False}),
(1, 5, {"commute": False}),
(2, 3, {"commute": False}),
(2, 4, {"commute": False}),
(2, 6, {"commute": False}),
(4, 5, {"commute": False}),
]
direct_successors = [[5], [5], [3, 4, 6], [], [5], [], []]
successors = [[5], [5], [3, 4, 5, 6], [], [5], [], []]
direct_predecessors = [[], [], [], [2], [2], [0, 1, 4], [2]]
predecessors = [[], [], [], [2], [2], [0, 1, 2, 4], [2]]
for i in range(0, 7):
assert dag.get_node(i).op.name == nodes[i].name
assert dag.get_node(i).op.data == nodes[i].data
assert dag.get_node(i).op.wires == nodes[i].wires
assert dag.direct_successors(i) == direct_successors[i]
assert dag.get_node(i).successors == successors[i] == dag.successors(i)
assert dag.direct_predecessors(i) == direct_predecessors[i]
assert dag.get_node(i).predecessors == predecessors[i] == dag.predecessors(i)
for i, edge in enumerate(dag.get_edges()):
assert edges[i] == edge
|
py | b40984a2cdbf526cefc8e6980d06193f5bfd289e | import numpy as np
import pytest
from .conftest import gdal_version
import rasterio
from rasterio import (
ubyte,
uint8,
uint16,
uint32,
uint64,
int16,
int32,
int64,
float32,
float64,
complex_,
complex_int16,
)
from rasterio.dtypes import (
_gdal_typename,
is_ndarray,
check_dtype,
get_minimum_dtype,
can_cast_dtype,
validate_dtype,
_is_complex_int,
_getnpdtype,
_get_gdal_dtype,
)
def test_is_ndarray():
assert is_ndarray(np.zeros((1,)))
assert not is_ndarray([0])
assert not is_ndarray((0,))
def test_np_dt_uint8():
assert check_dtype(np.uint8)
def test_dt_ubyte():
assert check_dtype(ubyte)
def test_check_dtype_invalid():
assert not check_dtype('foo')
@pytest.mark.parametrize(
("dtype", "name"),
[
(ubyte, "Byte"),
(np.uint8, "Byte"),
(np.uint16, "UInt16"),
("uint8", "Byte"),
("complex_int16", "CInt16"),
(complex_int16, "CInt16"),
],
)
def test_gdal_name(dtype, name):
assert _gdal_typename(dtype) == name
def test_get_minimum_dtype():
assert get_minimum_dtype([0, 1]) == uint8
assert get_minimum_dtype([0, 1000]) == uint16
assert get_minimum_dtype([0, 100000]) == uint32
assert get_minimum_dtype([-1, 0, 1]) == int16
assert get_minimum_dtype([-1, 0, 100000]) == int32
assert get_minimum_dtype([-1.5, 0, 1.5]) == float32
assert get_minimum_dtype([-1.5e+100, 0, 1.5e+100]) == float64
assert get_minimum_dtype(np.array([0, 1], dtype=np.uint)) == uint8
assert get_minimum_dtype(np.array([0, 1000], dtype=np.uint)) == uint16
assert get_minimum_dtype(np.array([0, 100000], dtype=np.uint)) == uint32
assert get_minimum_dtype(np.array([-1, 0, 1], dtype=int)) == int16
assert get_minimum_dtype(np.array([-1, 0, 100000], dtype=int)) == int32
assert get_minimum_dtype(np.array([-1.5, 0, 1.5], dtype=np.float64)) == float32
def test_get_minimum_dtype__int64():
if gdal_version.at_least("3.5"):
assert get_minimum_dtype([-1, 0, 2147483648]) == int64
else:
with pytest.raises(ValueError, match="Values out of range for supported dtypes"):
get_minimum_dtype([-1, 0, 2147483648])
def test_get_minimum_dtype__uint64():
if gdal_version.at_least("3.5"):
assert get_minimum_dtype([0, 4294967296]) == uint64
else:
with pytest.raises(ValueError, match="Values out of range for supported dtypes"):
get_minimum_dtype([0, 4294967296])
def test_can_cast_dtype():
assert can_cast_dtype((1, 2, 3), np.uint8)
assert can_cast_dtype(np.array([1, 2, 3]), np.uint8)
assert can_cast_dtype(np.array([1, 2, 3], dtype=np.uint8), np.uint8)
assert can_cast_dtype(np.array([1, 2, 3]), np.float32)
assert can_cast_dtype(np.array([1.4, 2.1, 3.65]), np.float32)
assert not can_cast_dtype(np.array([1.4, 2.1, 3.65]), np.uint8)
@pytest.mark.parametrize("dtype", ["float64", "float32"])
def test_can_cast_dtype_nan(dtype):
assert can_cast_dtype([np.nan], dtype)
@pytest.mark.parametrize("dtype", ["uint8", "uint16", "uint32", "int32"])
def test_cant_cast_dtype_nan(dtype):
assert not can_cast_dtype([np.nan], dtype)
def test_validate_dtype():
assert validate_dtype([1, 2, 3], ('uint8', 'uint16'))
assert validate_dtype(np.array([1, 2, 3]), ('uint8', 'uint16'))
assert validate_dtype(np.array([1.4, 2.1, 3.65]), ('float32',))
assert not validate_dtype(np.array([1.4, 2.1, 3.65]), ('uint8',))
def test_complex(tmpdir):
name = str(tmpdir.join("complex.tif"))
arr1 = np.ones((2, 2), dtype=complex_)
profile = dict(driver='GTiff', width=2, height=2, count=1, dtype=complex_)
with rasterio.open(name, 'w', **profile) as dst:
dst.write(arr1, 1)
with rasterio.open(name) as src:
arr2 = src.read(1)
assert np.array_equal(arr1, arr2)
def test_is_complex_int():
assert _is_complex_int("complex_int16")
def test_not_is_complex_int():
assert not _is_complex_int("complex")
def test_get_npdtype():
npdtype = _getnpdtype("complex_int16")
assert npdtype == np.complex64
assert npdtype.kind == "c"
def test__get_gdal_dtype__int64():
if gdal_version.at_least("3.5"):
assert _get_gdal_dtype("int64") == 12
else:
with pytest.raises(TypeError, match="Unsupported data type"):
_get_gdal_dtype("int64")
|
py | b40984eedbb5f302c735877d80cff22899636576 | # TestSwiftValueOfOptionals.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Check that trying to read an optional's numeric value doesn't crash LLDB
"""
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
class TestSwiftValueOfOptionalType(TestBase):
mydir = TestBase.compute_mydir(__file__)
@swiftTest
def test_swift_value_optional_type(self):
"""Check that trying to read an optional's numeric value doesn't crash LLDB"""
self.build()
self.do_check()
def setUp(self):
TestBase.setUp(self)
self.main_source = "main.swift"
self.main_source_spec = lldb.SBFileSpec(self.main_source)
def do_check(self):
"""Check that trying to read an optional's numeric value doesn't crash LLDB"""
s = self.frame().FindVariable("s")
self.assertTrue(s.GetValueAsSigned(0) == 0, "reading value fails")
self.assertTrue(s.GetValueAsSigned(1) == 1, "reading value fails")
self.assertTrue(s.GetValueAsUnsigned(0) == 0, "reading value fails")
self.assertTrue(s.GetValueAsUnsigned(1) == 1, "reading value fails")
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lldb.SBDebugger.Terminate)
unittest2.main()
|
py | b409869396c449fb39c856fa5ccf5dde4de872e8 | #!/usr/bin/env python
import os
import yaml
import shlex
import subprocess
travis = yaml.safe_load(open('.travis.yml', 'r'))
for env in travis['env']:
words = shlex.split(env)
d = {}
for word in words:
key_value = word.split('=', 1)
d[key_value[0]] = key_value[1]
crate_dir = d["CRATE"]
cmd = [
'cargo',
'build',
]
examples = d.get("EXAMPLES", None)
if examples != None:
cmd.extend(shlex.split(examples))
features = d.get("FEATURES", None)
if features != None:
cmd.extend(shlex.split(features))
print(cmd)
subprocess.check_call(cmd, cwd=crate_dir)
|
py | b409871c2d2a99338677c3b0a507684f3e26307c | from csv import QUOTE_MINIMAL, reader, writer
def read_from_csv(last):
with open('contacts.csv', newline='') as f:
contacts = reader(f, delimiter=' ', quotechar='|')
for row in contacts:
if row[1] == last:
contact = {}
contact.update(
{"first": row[0], "last": row[1], "phone": row[2]})
return contact
def write_to_csv(contact):
with open('contacts.csv', 'a', newline='') as f:
contacts = writer(
f, delimiter=' ', quotechar='|', quoting=QUOTE_MINIMAL)
contacts.writerow([contact["first"],
contact["last"], contact["phone"]])
def create(first, last, phone):
contact = {}
contact.update({"first": first, "last": last, "phone": phone})
write_to_csv(contact)
def read(last):
contact = read_from_csv(last)
print(f'{contact["first"]} {contact["last"]} {contact["phone"]}.')
return contact
def update(last, phone):
# ! Not updating, adding new entry.
contact = read_from_csv(last)
contact["phone"] = phone
print(
f'{contact["first"]} {contact["last"]} phone updated to {contact["phone"]}.')
write_to_csv(contact)
def delete(last):
# ! Not working
contact = read_from_csv(last)
print(f'{contact["first"]} {contact["last"]} removed.')
contact.update({"first": '', "last": '', "phone": ''})
write_to_csv(contact)
while True:
try:
query = int(
input('1: Create\n2. Read\n3: Update\n4: Delete\n5: Quit\n\n'))
except ValueError:
print('Please enter a value from 1 to 5.')
continue
if query == 1:
first = input('What is the first name? ')
last = input('What is the last name? ')
phone = input('What is the phone number? ')
create(first, last, phone)
elif query == 2:
key = input('Look up by last name. ')
read(key)
elif query == 3:
key = input('Which entry would you like to update? ')
phone = input('What is the phone number? ')
update(key, phone)
elif query == 4:
key = input('Enter a last name to remove: ')
delete(key)
elif query == 5:
quit()
|
py | b409880fc052f27651731dc82c378420e709c12f | """
https://leetcode.com/problems/partition-equal-subset-sum/
Given a non-empty array containing only positive integers, find if the array can be partitioned into two subsets such
that the sum of elements in both subsets is equal.
Note:
Each of the array element will not exceed 100.
The array size will not exceed 200.
Example 1:
Input: [1, 5, 11, 5]
Output: true
Explanation: The array can be partitioned as [1, 5, 5] and [11].
Example 2:
Input: [1, 2, 3, 5]
Output: false
Explanation: The array cannot be partitioned into equal sum subsets.
"""
class Solution(object):
def canPartition(self, nums):
"""
The idea is that after processing each number, whether or not a value in the range of the target sum is reachable
is a function of whether or not the value was previously reachable
:type nums: List[int]
:rtype: bool
"""
if len(nums) <= 1 or sum(nums) % 2 != 0:
return False
target_sum = int(sum(nums) / 2)
dp = [True] + [False] * target_sum
for num in nums:
dp = [
dp[previous_sum]
or (previous_sum >= num and dp[previous_sum - num])
for previous_sum in range(target_sum + 1)
]
if dp[target_sum]:
return True
return False
|
py | b40989634f7119b0d0849376c9d8cddcb43ce08f | # -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
# Futuristic imports
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
# Built-in imports
import os
import signal
import time
import unittest
import uuid
# Library imports
import six
# Finally, the package under test
import turicreate as tc
from turicreate.toolkits._internal_utils import _mac_ver
class ExploreTest(unittest.TestCase):
@unittest.skipIf(
_mac_ver() < (10, 12), "macOS-only test; UISoup doesn't work on Linux"
)
@unittest.skipIf(
_mac_ver() > (10, 13),
"macOS 10.14 appears to have broken the UX flow to prompt for accessibility access",
)
@unittest.skipIf(not (six.PY2), "Python 2.7-only test; UISoup doesn't work on 3.x")
def test_sanity_on_macOS(self):
"""
Create a simple SFrame, containing a very unique string.
Then, using uisoup, look for this string within a window
and assert that it appears.
"""
# Library imports
from uisoup import uisoup
# Generate some test data
unique_str = repr(uuid.uuid4())
sf = tc.SFrame({"a": [1, 2, 3], "b": ["hello", "world", unique_str]})
# Run the explore view and make sure we can see our unique string
sf.explore()
time.sleep(2)
window = None
try:
window = uisoup.get_window("Turi*Create*Visualization")
result = window.findall(value=unique_str)
self.assertEqual(
len(result),
1,
(
"Expected to find exactly one element containing the unique"
"string %s."
)
% unique_str,
)
first = result[0]
self.assertEqual(
first.acc_name,
unique_str,
(
"Expected to find the unique string %s as the name of the found"
"element. Instead, got %s."
)
% (unique_str, first.acc_name),
)
finally:
if window is not None:
# Kill the explore process
os.kill(window.proc_id, signal.SIGTERM)
|
py | b4098b5b1a11056248bd404a333fdd48b5ab3276 | import numpy as np
from pprint import pprint
from numba import jit
file = '2020/inputs/d11.txt'
# Read the file
with open(file) as f:
lines = [line.strip() for line in f if line.strip()]
S = np.array([[1 if x=='L' else 0 for x in line] for line in lines])
def count_adj_occupied(S):
occ = S==2
counts = np.zeros_like(S)
# Add counts to 8 adjacent squares
counts[1:,:] += occ[:-1,:]
counts[:-1,:] += occ[1:,:]
counts[:,1:] += occ[:,:-1]
counts[:,:-1] += occ[:,1:]
counts[1:,1:] += occ[:-1,:-1]
counts[1:,:-1] += occ[:-1,1:]
counts[:-1,1:] += occ[1:,:-1]
counts[:-1,:-1] += occ[1:,1:]
return counts
def move(S):
# print(S)
counts = count_adj_occupied(S)
# print(counts)
occ1 = (S==1) & (counts==0)
occ0 = (S==2) & (counts>=4)
S[occ1] = 2
S[occ0] = 1
return occ1.sum() + occ0.sum()
S_og = S.copy()
while True:
changed = move(S)
# print(changed, (S==2).sum())
if changed == 0:
break
print('P1', (S==2).sum())
# @jit(nopython=True, parallel=True)
def count_adj_occupied_part2(S):
occ = S==2
counts = np.zeros_like(S)
# Add counts to 8 adjacent squares
for i in range(S.shape[0]):
for j in range(S.shape[1]):
if S[i,j] >= 1:
for a in range(i+1, S.shape[0]):
if (x := S[a,j]) >= 1:
if x == 2: counts[i,j] += 1
break
for a in reversed(range(0, i)):
if (x := S[a,j]) >= 1:
if x == 2: counts[i,j] += 1
break
for b in range(j+1, S.shape[1]):
if (x := S[i,b]) >= 1:
if x == 2: counts[i,j] += 1
break
for b in reversed(range(0, j)):
if (x := S[i,b]) >= 1:
if x == 2: counts[i,j] += 1
break
for a,b in zip(range(i+1, S.shape[0]), range(j+1, S.shape[1])):
if (x := S[a,b]) >= 1:
if x == 2: counts[i,j] += 1
break
for a,b in zip(reversed(range(0,i)), range(j+1, S.shape[1])):
if (x := S[a,b]) >= 1:
if x == 2: counts[i,j] += 1
break
for a,b in zip(range(i+1, S.shape[0]), reversed(range(0, j))):
if (x := S[a,b]) >= 1:
if x == 2: counts[i,j] += 1
break
for a,b in zip(reversed(range(0,i)), reversed(range(0, j))):
if (x := S[a,b]) >= 1:
if x == 2: counts[i,j] += 1
break
return counts
def move2(S):
# print(S)
counts = count_adj_occupied_part2(S)
# print(counts)
occ1 = (S==1) & (counts==0)
occ0 = (S==2) & (counts>=5)
S[occ1] = 2
S[occ0] = 1
return occ1.sum() + occ0.sum()
S = S_og.copy()
while True:
changed = move2(S)
# print(changed, (S==2).sum())
if changed == 0:
break
print('P2', (S==2).sum())
|
py | b4098b5ba825890cec6215557103543b37dc05a4 | # ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Mozilla Sheriff Duty.
#
# The Initial Developer of the Original Code is Mozilla Corporation.
# Portions created by the Initial Developer are Copyright (C) 2011
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import datetime
from django.contrib.auth.models import User
from django.conf import settings
from django import forms
from utils import get_user_name
from models import Slot
class BaseForm(forms.Form):
def __init__(self, *args, **kwargs):
super(BaseForm, self).__init__(*args, **kwargs)
for field in self.fields:
if isinstance(self.fields[field], forms.fields.DateField):
klass = self.fields[field].widget.attrs.get('class')
if klass:
klass += ' date'
else:
klass = 'date'
self.fields[field].widget.attrs['class'] = klass
self.fields[field].input_formats = \
[settings.DEFAULT_DATE_FORMAT]
self.fields[field].widget.format = settings.DEFAULT_DATE_FORMAT
class InitializeRosterForm(BaseForm):
starting = forms.fields.DateField()
until = forms.fields.DateField()
usernames = forms.fields.CharField(widget=forms.widgets.Textarea())
class ReplaceRosterForm(BaseForm):
from_username = forms.fields.ChoiceField()
to_username = forms.fields.ChoiceField()
starting = forms.fields.DateField(required=False)
until = forms.fields.DateField(required=False)
def __init__(self, *args, **kwargs):
super(ReplaceRosterForm, self).__init__(*args, **kwargs)
choices = []
for slot in (Slot.objects
.filter(date__gte=datetime.date.today())
.select_related('user')
.order_by('user__first_name', 'user__username')):
if slot.user.username not in [x[0] for x in choices]:
choices.append((slot.user.username, get_user_name(slot.user)))
self.fields['from_username'].choices = choices
choices = []
for user in (User.objects.filter(is_active=True)
.order_by('first_name', 'username')):
choices.append((user.username, get_user_name(user)))
self.fields['to_username'].choices = choices
class ReplaceSlotRosterForm(BaseForm):
to_username = forms.fields.ChoiceField()
def __init__(self, slot, *args, **kwargs):
super(ReplaceSlotRosterForm, self).__init__(*args, **kwargs)
choices = []
for user in (User.objects.filter(is_active=True)
.exclude(pk=slot.user_id)
.order_by('first_name', 'username')):
choices.append((user.username, get_user_name(user)))
self.fields['to_username'].choices = choices
|
py | b4098c3f7c0377ea44202074848d31e60f4a283b | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium import webdriver
from selenium.test.selenium.webdriver.common import select_class_tests
from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer
def setup_module(module):
webserver = SimpleWebServer()
webserver.start()
FirefoxSelectElementHandlingTests.webserver = webserver
capabilities = {'marionette': False}
FirefoxSelectElementHandlingTests.driver = webdriver.Firefox(
capabilities=capabilities)
class FirefoxSelectElementHandlingTests(select_class_tests.WebDriverSelectSupportTests):
pass
def teardown_module(module):
FirefoxSelectElementHandlingTests.driver.quit()
FirefoxSelectElementHandlingTests.webserver.stop()
|
py | b4098c705433da350d8b975f24936bd27a16024b | import copy
import logging
import torch
from torch import nn
from inclearn.lib import factory
from .classifiers import (Classifier, CosineClassifier, DomainClassifier, MCCosineClassifier)
from .postprocessors import FactorScalar, HeatedUpScalar, InvertedFactorScalar
from inclearn.utils import LOGGER as logger
class BasicNet(nn.Module):
def __init__(
self,
convnet_type,
convnet_kwargs={},
classifier_kwargs={},
postprocessor_kwargs={},
wordembeddings_kwargs={},
init="kaiming",
device=None,
return_features=False,
extract_no_act=False,
classifier_no_act=False,
attention_hook=False,
rotations_predictor=False,
gradcam_hook=False
):
super(BasicNet, self).__init__()
if postprocessor_kwargs.get("type") == "learned_scaling":
self.post_processor = FactorScalar(**postprocessor_kwargs)
elif postprocessor_kwargs.get("type") == "inverted_learned_scaling":
self.post_processor = InvertedFactorScalar(**postprocessor_kwargs)
elif postprocessor_kwargs.get("type") == "heatedup":
self.post_processor = HeatedUpScalar(**postprocessor_kwargs)
elif postprocessor_kwargs.get("type") is None:
self.post_processor = None
else:
raise NotImplementedError(
"Unknown postprocessor {}.".format(postprocessor_kwargs["type"])
)
logger.LOGGER.info("Post processor is: {}".format(self.post_processor))
self.convnet = factory.get_convnet(convnet_type, **convnet_kwargs)
if "type" not in classifier_kwargs:
raise ValueError("Specify a classifier!", classifier_kwargs)
if classifier_kwargs["type"] == "fc":
self.classifier = Classifier(self.convnet.out_dim, device=device, **classifier_kwargs)
elif classifier_kwargs["type"] == "cosine":
self.classifier = CosineClassifier(
self.convnet.out_dim, device=device, **classifier_kwargs
)
elif classifier_kwargs["type"] == "mcdropout_cosine":
self.classifier = MCCosineClassifier(
self.convnet.out_dim, device=device, **classifier_kwargs
)
else:
raise ValueError("Unknown classifier type {}.".format(classifier_kwargs["type"]))
if rotations_predictor:
print("Using a rotations predictor.")
self.rotations_predictor = nn.Linear(self.convnet.out_dim, 4)
else:
self.rotations_predictor = None
self.word_embeddings = None
self.return_features = return_features
self.extract_no_act = extract_no_act
self.classifier_no_act = classifier_no_act
self.attention_hook = attention_hook
self.gradcam_hook = gradcam_hook
self.device = device
self.use_frozen_net = True
self.domain_classifier = None
if self.gradcam_hook:
self._hooks = [None, None]
logger.LOGGER.info("Setting gradcam hook for gradients + activations of last conv.")
self.set_gradcam_hook()
if self.extract_no_act:
logger.LOGGER.info("Features will be extracted without the last ReLU.")
if self.classifier_no_act:
logger.LOGGER.info("No ReLU will be applied on features before feeding the classifier.")
self.to(self.device)
def on_task_end(self):
if isinstance(self.classifier, nn.Module):
self.classifier.on_task_end()
if isinstance(self.post_processor, nn.Module):
self.post_processor.on_task_end()
def on_epoch_end(self):
if isinstance(self.classifier, nn.Module):
self.classifier.on_epoch_end()
if isinstance(self.post_processor, nn.Module):
self.post_processor.on_epoch_end()
def forward(
self, x, rotation=False, index=None, features_processing=None, additional_features=None, *args, **kwargs
):
if hasattr(self,
"word_embeddings") and self.word_embeddings is not None and isinstance(x, list):
words = x[1]
x = x[0]
else:
words = None
outputs = self.convnet(x, *args, **kwargs)
if words is not None: # ugly to change
outputs["word_embeddings"] = self.word_embeddings(words)
if hasattr(self, "classifier_no_act") and self.classifier_no_act:
selected_features = outputs["raw_features"]
else:
selected_features = outputs["features"]
if features_processing is not None:
selected_features = features_processing.fit_transform(selected_features)
if rotation:
outputs["rotations"] = self.rotations_predictor(outputs["features"])
nb_inputs = len(x) // 4
# for k in outputs.keys():
# if k != "rotations":
# if isinstance(outputs[k], list):
# outputs[k] = [elt[:32] for elt in outputs[k]]
# else:
# outputs[k] = outputs[k][:32]
else:
if additional_features is not None:
clf_outputs = self.classifier(
torch.cat((selected_features, additional_features), 0)
)
else:
clf_outputs = self.classifier(selected_features)
outputs.update(clf_outputs)
if hasattr(self, "gradcam_hook") and self.gradcam_hook:
outputs["gradcam_gradients"] = self._gradcam_gradients
outputs["gradcam_activations"] = self._gradcam_activations
return outputs
def post_process(self, x):
if self.post_processor is None:
return x
return self.post_processor(x)
@property
def features_dim(self):
return self.convnet.out_dim
def add_classes(self, n_classes):
self.classifier.add_classes(n_classes)
def add_imprinted_classes(self, class_indexes, inc_dataset, **kwargs):
if hasattr(self.classifier, "add_imprinted_classes"):
self.classifier.add_imprinted_classes(class_indexes, inc_dataset, self, **kwargs)
def add_custom_weights(self, weights, **kwargs):
self.classifier.add_custom_weights(weights, **kwargs)
def extract(self, x):
outputs = self.convnet(x)
if self.extract_no_act:
return outputs["raw_features"]
return outputs["features"]
def predict_rotations(self, inputs):
if self.rotations_predictor is None:
raise ValueError("Enable the rotations predictor.")
return self.rotations_predictor(self.convnet(inputs)["features"])
def freeze(self, trainable=False, model="all"):
if model == "all":
model = self
elif model == "convnet":
model = self.convnet
elif model == "classifier":
model = self.classifier
elif 'convnet.' in model:
self.convnet.freeze(trainable=trainable, model=model.split('.')[1])
else:
assert False, model
if not isinstance(model, nn.Module):
return self
for param in model.parameters():
param.requires_grad = trainable
if hasattr(self, "gradcam_hook") and self.gradcam_hook and model == "convnet":
for param in self.convnet.last_conv.parameters():
param.requires_grad = True
if not trainable:
model.eval()
else:
model.train()
return self
def get_group_parameters(self):
groups = {"convnet": self.convnet.parameters()}
if isinstance(self.post_processor, FactorScalar):
groups["postprocessing"] = self.post_processor.parameters()
if hasattr(self.classifier, "new_weights"):
groups["new_weights"] = self.classifier.new_weights
if hasattr(self.classifier, "old_weights"):
groups["old_weights"] = self.classifier.old_weights
if self.rotations_predictor:
groups["rotnet"] = self.rotations_predictor.parameters()
if hasattr(self.convnet, "last_block"):
groups["last_block"] = self.convnet.last_block.parameters()
if hasattr(self.classifier, "_negative_weights"
) and isinstance(self.classifier._negative_weights, nn.Parameter):
groups["neg_weights"] = self.classifier._negative_weights
if self.domain_classifier is not None:
groups["domain_clf"] = self.domain_classifier.parameters()
return groups
def copy(self):
return copy.deepcopy(self)
@property
def n_classes(self):
return self.classifier.n_classes
def unset_gradcam_hook(self):
self._hooks[0].remove()
self._hooks[1].remove()
self._hooks[0] = None
self._hooks[1] = None
self._gradcam_gradients, self._gradcam_activations = [None], [None]
def set_gradcam_hook(self):
self._gradcam_gradients, self._gradcam_activations = [None], [None]
def backward_hook(module, grad_input, grad_output):
self._gradcam_gradients[0] = grad_output[0]
return None
def forward_hook(module, input, output):
self._gradcam_activations[0] = output
return None
self._hooks[0] = self.convnet.last_conv.register_backward_hook(backward_hook)
self._hooks[1] = self.convnet.last_conv.register_forward_hook(forward_hook)
def create_domain_classifier(self):
self.domain_classifier = DomainClassifier(self.convnet.out_dim, device=self.device)
return self.domain_classifier
def del_domain_classifier(self):
self.domain_classifier = None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.