text
stringlengths 4
1.02M
| meta
dict |
---|---|
from stomp_message_controller import StompMessageController
from stomp_message import StompMessage
class TomcatVirtualHostServiceController(StompMessageController):
stomp_tasks = ["CreateTomcatVirtualHost", "RemoveTomcatVirtualHost", "EnableTomcatVirtualHost", "DisableTomcatVirtualHost"]
def run(self):
print("TomcatVirtualHostServiceController")
def create_tomcat_virtual_host(self, stomp_message):
print("CreateTomcatVirtualHost - tomcat_virtual_host_service_controller.create_tomcat_virtual_host()")
stomp_message.print_message()
successfulProvisioning = True
acknowledgementMessageDict = stomp_message.parsed_body
if successfulProvisioning:
self.acknowledge_success(acknowledgementMessageDict)
else:
self.acknowledge_failure(acknowledgementMessageDict)
def remove_tomcat_virtual_host(self, stomp_message):
print("RemoveTomcatVirtualHost - tomcat_virtual_host_service_controller.remove_tomcat_virtual_host()")
stomp_message.print_message()
successfulProvisioning = True
acknowledgementMessageDict = stomp_message.parsed_body
if successfulProvisioning:
self.acknowledge_success(acknowledgementMessageDict)
else:
self.acknowledge_failure(acknowledgementMessageDict)
def enable_tomcat_virtual_host(self, stomp_message):
print("EnableTomcatVirtualHost - tomcat_virtual_host_service_controller.enable_tomcat_virtual_host()")
stomp_message.print_message()
successfulProvisioning = True
acknowledgementMessageDict = stomp_message.parsed_body
if successfulProvisioning:
self.acknowledge_success(acknowledgementMessageDict)
else:
self.acknowledge_failure(acknowledgementMessageDict)
def disable_tomcat_virtual_host(self, stomp_message):
print("DisableTomcatVirtualHost - tomcat_virtual_host_service_controller.disable_tomcat_virtual_host()")
stomp_message.print_message()
successfulProvisioning = True
acknowledgementMessageDict = stomp_message.parsed_body
if successfulProvisioning:
self.acknowledge_success(acknowledgementMessageDict)
else:
self.acknowledge_failure(acknowledgementMessageDict)
def acknowledge_success(self, acknowledgementMessageDict):
print("== Acknowledge Message Success ==")
acknowledgementQueue = acknowledgementMessageDict["service"]
acknowledgementMessageDict["taskResult"] = "Success"
self.send_message(acknowledgementQueue, acknowledgementMessageDict)
def acknowledge_failure(self, acknowledgementMessageDict):
print("== Acknowledge Message Failure ==")
acknowledgementQueue = acknowledgementMessageDict["service"]
acknowledgementMessageDict["taskResult"] = "Failure"
self.send_message(acknowledgementQueue, acknowledgementMessageDict)
| {
"content_hash": "a58f823c9f3f11e2486b872d6cee0492",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 124,
"avg_line_length": 36.945205479452056,
"alnum_prop": 0.8068223952539859,
"repo_name": "MarkAufdencamp/stomp-client-daemon",
"id": "77ce87a22d5befc2caeff1d0db4a7d429b422cdb",
"size": "2697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tomcat_management_processor/tomcat_virtual_host_service_controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "8872"
},
{
"name": "Python",
"bytes": "67691"
},
{
"name": "Shell",
"bytes": "10485"
}
],
"symlink_target": ""
} |
"""aide associée à ce module, exemple d'utiliation de pydoc"""
import os.path
import os
def pydoc_present () :
"""teste la présence du fichier pydoc.py"""
p = "c:\\python26\\lib\\pydoc.py"""
return os.path.exists (p)
def pydoc_generation (file) :
"""génère la documentation associée au fichier file"""
if not pydoc_present () :
raise Exception ("pydoc n'est pas installé")
os.system ("c:\\python26\\python c:\\python26\\lib\\pydoc.py -w " + file)
class ExempleClass (object) :
"""exemple de classe avec de la documentation
la classe contient comme attribut :
- li : liste quelconque
"""
def __init__ (self) :
object.__init__ (self)
self.li = ["un", "deux"]
def __str__ (self) :
"""permet d'afficher la classe sous forme de chaînes de caractères"""
return "li = " + str (self.li)
if __name__ == "__main__" :
e = ExempleClass ()
print e # affiche li = ['un', 'deux']
pydoc_generation ("exemple_pydoc") | {
"content_hash": "1334c1c4a043db1d741c6da8de6b61ab",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 77,
"avg_line_length": 32.87096774193548,
"alnum_prop": 0.5947006869479883,
"repo_name": "sdpython/teachpyx",
"id": "dafbbf235c30894739847ca5e0504c244dabf228",
"size": "1037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_todo/programme/exemple_pydoc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "382"
},
{
"name": "C++",
"bytes": "1348"
},
{
"name": "CSS",
"bytes": "1034"
},
{
"name": "HTML",
"bytes": "225"
},
{
"name": "Inno Setup",
"bytes": "7826"
},
{
"name": "Jupyter Notebook",
"bytes": "329171"
},
{
"name": "Python",
"bytes": "162686"
},
{
"name": "TeX",
"bytes": "280776"
}
],
"symlink_target": ""
} |
import json
from django import template
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter(is_safe=True)
def jsonify(obj, indent=0):
return mark_safe(json.dumps(obj, indent=indent))
| {
"content_hash": "c1fe2687e06b2cc1dbe609cf99b20f8f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 52,
"avg_line_length": 21.09090909090909,
"alnum_prop": 0.7672413793103449,
"repo_name": "bulv1ne/django-utils",
"id": "57a798db617631067d2ae447d71f07fff528086c",
"size": "232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/templatetags/jsonify.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "94"
},
{
"name": "Python",
"bytes": "35302"
},
{
"name": "Shell",
"bytes": "237"
}
],
"symlink_target": ""
} |
import logging
from neutronclient.neutron import v2_0 as neutronV20
def _format_provider(pool):
return pool.get('provider') or 'N/A'
class ListPool(neutronV20.ListCommand):
"""List pools that belong to a given tenant."""
resource = 'pool'
log = logging.getLogger(__name__ + '.ListPool')
list_columns = ['id', 'name', 'provider', 'lb_method', 'protocol',
'admin_state_up', 'status']
_formatters = {'provider': _format_provider}
pagination_support = True
sorting_support = True
class ShowPool(neutronV20.ShowCommand):
"""Show information of a given pool."""
resource = 'pool'
log = logging.getLogger(__name__ + '.ShowPool')
class CreatePool(neutronV20.CreateCommand):
"""Create a pool."""
resource = 'pool'
log = logging.getLogger(__name__ + '.CreatePool')
def add_known_arguments(self, parser):
parser.add_argument(
'--admin-state-down',
dest='admin_state', action='store_false',
help='set admin state up to false')
parser.add_argument(
'--description',
help='description of the pool')
parser.add_argument(
'--lb-method',
required=True,
choices=['ROUND_ROBIN', 'LEAST_CONNECTIONS', 'SOURCE_IP'],
help='the algorithm used to distribute load between the members '
'of the pool')
parser.add_argument(
'--name',
required=True,
help='the name of the pool')
parser.add_argument(
'--protocol',
required=True,
choices=['HTTP', 'HTTPS', 'TCP'],
help='protocol for balancing')
parser.add_argument(
'--subnet-id', metavar='SUBNET',
required=True,
help='the subnet on which the members of the pool will be located')
parser.add_argument(
'--provider',
help='provider name of loadbalancer service')
def args2body(self, parsed_args):
_subnet_id = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'subnet', parsed_args.subnet_id)
body = {
self.resource: {
'admin_state_up': parsed_args.admin_state,
'subnet_id': _subnet_id,
},
}
neutronV20.update_dict(parsed_args, body[self.resource],
['description', 'lb_method', 'name',
'protocol', 'tenant_id', 'provider'])
return body
class UpdatePool(neutronV20.UpdateCommand):
"""Update a given pool."""
resource = 'pool'
log = logging.getLogger(__name__ + '.UpdatePool')
class DeletePool(neutronV20.DeleteCommand):
"""Delete a given pool."""
resource = 'pool'
log = logging.getLogger(__name__ + '.DeletePool')
class RetrievePoolStats(neutronV20.ShowCommand):
"""Retrieve stats for a given pool."""
resource = 'pool'
log = logging.getLogger(__name__ + '.RetrievePoolStats')
def get_data(self, parsed_args):
self.log.debug('run(%s)' % parsed_args)
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
pool_id = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'pool', parsed_args.id)
params = {}
if parsed_args.fields:
params = {'fields': parsed_args.fields}
data = neutron_client.retrieve_pool_stats(pool_id, **params)
self.format_output_data(data)
stats = data['stats']
if 'stats' in data:
return zip(*sorted(stats.iteritems()))
else:
return None
| {
"content_hash": "8c544c5bd63a7dd9ccd8fc3fdc4182f2",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 79,
"avg_line_length": 31.700854700854702,
"alnum_prop": 0.5710434079266649,
"repo_name": "ntt-sic/python-neutronclient",
"id": "1c62dcebf85a43f89974e666cf5ebbb819209053",
"size": "4424",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutronclient/neutron/v2_0/lb/pool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "619450"
},
{
"name": "Shell",
"bytes": "5278"
}
],
"symlink_target": ""
} |
from __future__ import division, unicode_literals, print_function
from django.core.management import BaseCommand
from bs4 import BeautifulSoup
from applications.dianping.models import Shop
import requests
import json
base_url = "http://www.dianping.com/ajax/json/shop/wizard/BasicHideInfoAjaxFP?"
headers = {
"referer": "http://www.dianping.com/",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36",
}
class Command(BaseCommand):
def handle(self, *args, **options):
shops = Shop.objects.all()
for shop in shops:
try:
self.parse_shop(shop)
except Exception, err:
print(err)
continue
def parse_shop(self, shop):
if shop.info:
return
print("parse shop %s" % shop.name)
url = "%sshopId=%s" % (base_url, shop.shop_id)
content = requests.get(url).content
json_data = json.loads(content)
shop.phone = json_data['msg']['shopInfo']['phoneNo']
shop.phone2 = json_data['msg']['shopInfo']['phoneNo2']
shop.address = json_data['msg']['shopInfo']['address']
shop.info = content
shop.save() | {
"content_hash": "16010634c5ca66891a240f72bf04bb0b",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 141,
"avg_line_length": 32.41025641025641,
"alnum_prop": 0.617879746835443,
"repo_name": "chenchiyuan/gym",
"id": "e98df3e25551c995e25781115feeb22508d28f37",
"size": "1316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "applications/dianping/management/commands/parse_dianping_detail.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "71247"
},
{
"name": "HTML",
"bytes": "113377"
},
{
"name": "JavaScript",
"bytes": "1358687"
},
{
"name": "Python",
"bytes": "87609"
},
{
"name": "Shell",
"bytes": "844"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class APIError(Model):
"""Error information returned by the API.
:param code: The error code.
:type code: object
:param message: A message explaining the error reported by the service.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'object'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, *, code=None, message: str=None, **kwargs) -> None:
super(APIError, self).__init__(**kwargs)
self.code = code
self.message = message
class APIErrorException(HttpOperationError):
"""Server responded with exception of type: 'APIError'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(APIErrorException, self).__init__(deserialize, response, 'APIError', *args)
class ChangePointDetectRequest(Model):
"""ChangePointDetectRequest.
All required parameters must be populated in order to send to Azure.
:param series: Required. Time series data points. Points should be sorted
by timestamp in ascending order to match the change point detection
result.
:type series: list[~azure.cognitiveservices.anomalydetector.models.Point]
:param granularity: Required. Can only be one of yearly, monthly, weekly,
daily, hourly, minutely or secondly. Granularity is used for verify
whether input series is valid. Possible values include: 'yearly',
'monthly', 'weekly', 'daily', 'hourly', 'minutely', 'secondly'
:type granularity: str or
~azure.cognitiveservices.anomalydetector.models.Granularity
:param custom_interval: Custom Interval is used to set non-standard time
interval, for example, if the series is 5 minutes, request can be set as
{"granularity":"minutely", "customInterval":5}.
:type custom_interval: int
:param period: Optional argument, periodic value of a time series. If the
value is null or does not present, the API will determine the period
automatically.
:type period: int
:param stable_trend_window: Optional argument, advanced model parameter, a
default stableTrendWindow will be used in detection.
:type stable_trend_window: int
:param threshold: Optional argument, advanced model parameter, between
0.0-1.0, the lower the value is, the larger the trend error will be which
means less change point will be accepted.
:type threshold: float
"""
_validation = {
'series': {'required': True},
'granularity': {'required': True},
}
_attribute_map = {
'series': {'key': 'series', 'type': '[Point]'},
'granularity': {'key': 'granularity', 'type': 'Granularity'},
'custom_interval': {'key': 'customInterval', 'type': 'int'},
'period': {'key': 'period', 'type': 'int'},
'stable_trend_window': {'key': 'stableTrendWindow', 'type': 'int'},
'threshold': {'key': 'threshold', 'type': 'float'},
}
def __init__(self, *, series, granularity, custom_interval: int=None, period: int=None, stable_trend_window: int=None, threshold: float=None, **kwargs) -> None:
super(ChangePointDetectRequest, self).__init__(**kwargs)
self.series = series
self.granularity = granularity
self.custom_interval = custom_interval
self.period = period
self.stable_trend_window = stable_trend_window
self.threshold = threshold
class ChangePointDetectResponse(Model):
"""ChangePointDetectResponse.
All required parameters must be populated in order to send to Azure.
:param period: Required. Frequency extracted from the series, zero means
no recurrent pattern has been found.
:type period: int
:param is_change_point: Required. isChangePoint contains change point
properties for each input point. True means an anomaly either negative or
positive has been detected. The index of the array is consistent with the
input series.
:type is_change_point: list[bool]
:param confidence_scores: Required. the change point confidence of each
point
:type confidence_scores: list[float]
"""
_validation = {
'period': {'required': True},
'is_change_point': {'required': True},
'confidence_scores': {'required': True},
}
_attribute_map = {
'period': {'key': 'period', 'type': 'int'},
'is_change_point': {'key': 'isChangePoint', 'type': '[bool]'},
'confidence_scores': {'key': 'confidenceScores', 'type': '[float]'},
}
def __init__(self, *, period: int, is_change_point, confidence_scores, **kwargs) -> None:
super(ChangePointDetectResponse, self).__init__(**kwargs)
self.period = period
self.is_change_point = is_change_point
self.confidence_scores = confidence_scores
class EntireDetectResponse(Model):
"""EntireDetectResponse.
All required parameters must be populated in order to send to Azure.
:param period: Required. Frequency extracted from the series, zero means
no recurrent pattern has been found.
:type period: int
:param expected_values: Required. ExpectedValues contain expected value
for each input point. The index of the array is consistent with the input
series.
:type expected_values: list[float]
:param upper_margins: Required. UpperMargins contain upper margin of each
input point. UpperMargin is used to calculate upperBoundary, which equals
to expectedValue + (100 - marginScale)*upperMargin. Anomalies in response
can be filtered by upperBoundary and lowerBoundary. By adjusting
marginScale value, less significant anomalies can be filtered in client
side. The index of the array is consistent with the input series.
:type upper_margins: list[float]
:param lower_margins: Required. LowerMargins contain lower margin of each
input point. LowerMargin is used to calculate lowerBoundary, which equals
to expectedValue - (100 - marginScale)*lowerMargin. Points between the
boundary can be marked as normal ones in client side. The index of the
array is consistent with the input series.
:type lower_margins: list[float]
:param is_anomaly: Required. IsAnomaly contains anomaly properties for
each input point. True means an anomaly either negative or positive has
been detected. The index of the array is consistent with the input series.
:type is_anomaly: list[bool]
:param is_negative_anomaly: Required. IsNegativeAnomaly contains anomaly
status in negative direction for each input point. True means a negative
anomaly has been detected. A negative anomaly means the point is detected
as an anomaly and its real value is smaller than the expected one. The
index of the array is consistent with the input series.
:type is_negative_anomaly: list[bool]
:param is_positive_anomaly: Required. IsPositiveAnomaly contain anomaly
status in positive direction for each input point. True means a positive
anomaly has been detected. A positive anomaly means the point is detected
as an anomaly and its real value is larger than the expected one. The
index of the array is consistent with the input series.
:type is_positive_anomaly: list[bool]
"""
_validation = {
'period': {'required': True},
'expected_values': {'required': True},
'upper_margins': {'required': True},
'lower_margins': {'required': True},
'is_anomaly': {'required': True},
'is_negative_anomaly': {'required': True},
'is_positive_anomaly': {'required': True},
}
_attribute_map = {
'period': {'key': 'period', 'type': 'int'},
'expected_values': {'key': 'expectedValues', 'type': '[float]'},
'upper_margins': {'key': 'upperMargins', 'type': '[float]'},
'lower_margins': {'key': 'lowerMargins', 'type': '[float]'},
'is_anomaly': {'key': 'isAnomaly', 'type': '[bool]'},
'is_negative_anomaly': {'key': 'isNegativeAnomaly', 'type': '[bool]'},
'is_positive_anomaly': {'key': 'isPositiveAnomaly', 'type': '[bool]'},
}
def __init__(self, *, period: int, expected_values, upper_margins, lower_margins, is_anomaly, is_negative_anomaly, is_positive_anomaly, **kwargs) -> None:
super(EntireDetectResponse, self).__init__(**kwargs)
self.period = period
self.expected_values = expected_values
self.upper_margins = upper_margins
self.lower_margins = lower_margins
self.is_anomaly = is_anomaly
self.is_negative_anomaly = is_negative_anomaly
self.is_positive_anomaly = is_positive_anomaly
class LastDetectResponse(Model):
"""LastDetectResponse.
All required parameters must be populated in order to send to Azure.
:param period: Required. Frequency extracted from the series, zero means
no recurrent pattern has been found.
:type period: int
:param suggested_window: Required. Suggested input series points needed
for detecting the latest point.
:type suggested_window: int
:param expected_value: Required. Expected value of the latest point.
:type expected_value: float
:param upper_margin: Required. Upper margin of the latest point.
UpperMargin is used to calculate upperBoundary, which equals to
expectedValue + (100 - marginScale)*upperMargin. If the value of latest
point is between upperBoundary and lowerBoundary, it should be treated as
normal value. By adjusting marginScale value, anomaly status of latest
point can be changed.
:type upper_margin: float
:param lower_margin: Required. Lower margin of the latest point.
LowerMargin is used to calculate lowerBoundary, which equals to
expectedValue - (100 - marginScale)*lowerMargin.
:type lower_margin: float
:param is_anomaly: Required. Anomaly status of the latest point, true
means the latest point is an anomaly either in negative direction or
positive direction.
:type is_anomaly: bool
:param is_negative_anomaly: Required. Anomaly status in negative direction
of the latest point. True means the latest point is an anomaly and its
real value is smaller than the expected one.
:type is_negative_anomaly: bool
:param is_positive_anomaly: Required. Anomaly status in positive direction
of the latest point. True means the latest point is an anomaly and its
real value is larger than the expected one.
:type is_positive_anomaly: bool
"""
_validation = {
'period': {'required': True},
'suggested_window': {'required': True},
'expected_value': {'required': True},
'upper_margin': {'required': True},
'lower_margin': {'required': True},
'is_anomaly': {'required': True},
'is_negative_anomaly': {'required': True},
'is_positive_anomaly': {'required': True},
}
_attribute_map = {
'period': {'key': 'period', 'type': 'int'},
'suggested_window': {'key': 'suggestedWindow', 'type': 'int'},
'expected_value': {'key': 'expectedValue', 'type': 'float'},
'upper_margin': {'key': 'upperMargin', 'type': 'float'},
'lower_margin': {'key': 'lowerMargin', 'type': 'float'},
'is_anomaly': {'key': 'isAnomaly', 'type': 'bool'},
'is_negative_anomaly': {'key': 'isNegativeAnomaly', 'type': 'bool'},
'is_positive_anomaly': {'key': 'isPositiveAnomaly', 'type': 'bool'},
}
def __init__(self, *, period: int, suggested_window: int, expected_value: float, upper_margin: float, lower_margin: float, is_anomaly: bool, is_negative_anomaly: bool, is_positive_anomaly: bool, **kwargs) -> None:
super(LastDetectResponse, self).__init__(**kwargs)
self.period = period
self.suggested_window = suggested_window
self.expected_value = expected_value
self.upper_margin = upper_margin
self.lower_margin = lower_margin
self.is_anomaly = is_anomaly
self.is_negative_anomaly = is_negative_anomaly
self.is_positive_anomaly = is_positive_anomaly
class Point(Model):
"""Point.
All required parameters must be populated in order to send to Azure.
:param timestamp: Required. Timestamp of a data point (ISO8601 format).
:type timestamp: datetime
:param value: Required. The measurement of that point, should be float.
:type value: float
"""
_validation = {
'timestamp': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'value': {'key': 'value', 'type': 'float'},
}
def __init__(self, *, timestamp, value: float, **kwargs) -> None:
super(Point, self).__init__(**kwargs)
self.timestamp = timestamp
self.value = value
class Request(Model):
"""Request.
All required parameters must be populated in order to send to Azure.
:param series: Required. Time series data points. Points should be sorted
by timestamp in ascending order to match the anomaly detection result. If
the data is not sorted correctly or there is duplicated timestamp, the API
will not work. In such case, an error message will be returned.
:type series: list[~azure.cognitiveservices.anomalydetector.models.Point]
:param granularity: Required. Possible values include: 'yearly',
'monthly', 'weekly', 'daily', 'hourly', 'minutely', 'secondly'
:type granularity: str or
~azure.cognitiveservices.anomalydetector.models.Granularity
:param custom_interval: Custom Interval is used to set non-standard time
interval, for example, if the series is 5 minutes, request can be set as
{"granularity":"minutely", "customInterval":5}.
:type custom_interval: int
:param period: Optional argument, periodic value of a time series. If the
value is null or does not present, the API will determine the period
automatically.
:type period: int
:param max_anomaly_ratio: Optional argument, advanced model parameter, max
anomaly ratio in a time series.
:type max_anomaly_ratio: float
:param sensitivity: Optional argument, advanced model parameter, between
0-99, the lower the value is, the larger the margin value will be which
means less anomalies will be accepted.
:type sensitivity: int
"""
_validation = {
'series': {'required': True},
'granularity': {'required': True},
}
_attribute_map = {
'series': {'key': 'series', 'type': '[Point]'},
'granularity': {'key': 'granularity', 'type': 'Granularity'},
'custom_interval': {'key': 'customInterval', 'type': 'int'},
'period': {'key': 'period', 'type': 'int'},
'max_anomaly_ratio': {'key': 'maxAnomalyRatio', 'type': 'float'},
'sensitivity': {'key': 'sensitivity', 'type': 'int'},
}
def __init__(self, *, series, granularity, custom_interval: int=None, period: int=None, max_anomaly_ratio: float=None, sensitivity: int=None, **kwargs) -> None:
super(Request, self).__init__(**kwargs)
self.series = series
self.granularity = granularity
self.custom_interval = custom_interval
self.period = period
self.max_anomaly_ratio = max_anomaly_ratio
self.sensitivity = sensitivity
| {
"content_hash": "52072aa7c0d13d9ddb055a15282af90b",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 217,
"avg_line_length": 43.884507042253524,
"alnum_prop": 0.6648051864689646,
"repo_name": "Azure/azure-sdk-for-python",
"id": "4d86c5b14eee26f41dedff5e9af29459cd02a50f",
"size": "16053",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/cognitiveservices/azure-cognitiveservices-anomalydetector/azure/cognitiveservices/anomalydetector/models/_models_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Tests of grpc_status with gRPC AsyncIO stack."""
import logging
import traceback
import unittest
import grpc
from google.protobuf import any_pb2
from google.rpc import code_pb2, error_details_pb2, status_pb2
from grpc.experimental import aio
from grpc_status import rpc_status
from tests_aio.unit._test_base import AioTestBase
_STATUS_OK = '/test/StatusOK'
_STATUS_NOT_OK = '/test/StatusNotOk'
_ERROR_DETAILS = '/test/ErrorDetails'
_INCONSISTENT = '/test/Inconsistent'
_INVALID_CODE = '/test/InvalidCode'
_REQUEST = b'\x00\x00\x00'
_RESPONSE = b'\x01\x01\x01'
_GRPC_DETAILS_METADATA_KEY = 'grpc-status-details-bin'
_STATUS_DETAILS = 'This is an error detail'
_STATUS_DETAILS_ANOTHER = 'This is another error detail'
async def _ok_unary_unary(request, servicer_context):
return _RESPONSE
async def _not_ok_unary_unary(request, servicer_context):
await servicer_context.abort(grpc.StatusCode.INTERNAL, _STATUS_DETAILS)
async def _error_details_unary_unary(request, servicer_context):
details = any_pb2.Any()
details.Pack(
error_details_pb2.DebugInfo(stack_entries=traceback.format_stack(),
detail='Intentionally invoked'))
rich_status = status_pb2.Status(
code=code_pb2.INTERNAL,
message=_STATUS_DETAILS,
details=[details],
)
await servicer_context.abort_with_status(rpc_status.to_status(rich_status))
async def _inconsistent_unary_unary(request, servicer_context):
rich_status = status_pb2.Status(
code=code_pb2.INTERNAL,
message=_STATUS_DETAILS,
)
servicer_context.set_code(grpc.StatusCode.NOT_FOUND)
servicer_context.set_details(_STATUS_DETAILS_ANOTHER)
# User put inconsistent status information in trailing metadata
servicer_context.set_trailing_metadata(
((_GRPC_DETAILS_METADATA_KEY, rich_status.SerializeToString()),))
async def _invalid_code_unary_unary(request, servicer_context):
rich_status = status_pb2.Status(
code=42,
message='Invalid code',
)
await servicer_context.abort_with_status(rpc_status.to_status(rich_status))
class _GenericHandler(grpc.GenericRpcHandler):
def service(self, handler_call_details):
if handler_call_details.method == _STATUS_OK:
return grpc.unary_unary_rpc_method_handler(_ok_unary_unary)
elif handler_call_details.method == _STATUS_NOT_OK:
return grpc.unary_unary_rpc_method_handler(_not_ok_unary_unary)
elif handler_call_details.method == _ERROR_DETAILS:
return grpc.unary_unary_rpc_method_handler(
_error_details_unary_unary)
elif handler_call_details.method == _INCONSISTENT:
return grpc.unary_unary_rpc_method_handler(
_inconsistent_unary_unary)
elif handler_call_details.method == _INVALID_CODE:
return grpc.unary_unary_rpc_method_handler(
_invalid_code_unary_unary)
else:
return None
class StatusTest(AioTestBase):
async def setUp(self):
self._server = aio.server()
self._server.add_generic_rpc_handlers((_GenericHandler(),))
port = self._server.add_insecure_port('[::]:0')
await self._server.start()
self._channel = aio.insecure_channel('localhost:%d' % port)
async def tearDown(self):
await self._server.stop(None)
await self._channel.close()
async def test_status_ok(self):
call = self._channel.unary_unary(_STATUS_OK)(_REQUEST)
# Succeed RPC doesn't have status
status = await rpc_status.aio.from_call(call)
self.assertIs(status, None)
async def test_status_not_ok(self):
call = self._channel.unary_unary(_STATUS_NOT_OK)(_REQUEST)
with self.assertRaises(aio.AioRpcError) as exception_context:
await call
rpc_error = exception_context.exception
self.assertEqual(rpc_error.code(), grpc.StatusCode.INTERNAL)
# Failed RPC doesn't automatically generate status
status = await rpc_status.aio.from_call(call)
self.assertIs(status, None)
async def test_error_details(self):
call = self._channel.unary_unary(_ERROR_DETAILS)(_REQUEST)
with self.assertRaises(aio.AioRpcError) as exception_context:
await call
rpc_error = exception_context.exception
status = await rpc_status.aio.from_call(call)
self.assertEqual(rpc_error.code(), grpc.StatusCode.INTERNAL)
self.assertEqual(status.code, code_pb2.Code.Value('INTERNAL'))
# Check if the underlying proto message is intact
self.assertTrue(status.details[0].Is(
error_details_pb2.DebugInfo.DESCRIPTOR))
info = error_details_pb2.DebugInfo()
status.details[0].Unpack(info)
self.assertIn('_error_details_unary_unary', info.stack_entries[-1])
async def test_code_message_validation(self):
call = self._channel.unary_unary(_INCONSISTENT)(_REQUEST)
with self.assertRaises(aio.AioRpcError) as exception_context:
await call
rpc_error = exception_context.exception
self.assertEqual(rpc_error.code(), grpc.StatusCode.NOT_FOUND)
# Code/Message validation failed
with self.assertRaises(ValueError):
await rpc_status.aio.from_call(call)
async def test_invalid_code(self):
with self.assertRaises(aio.AioRpcError) as exception_context:
await self._channel.unary_unary(_INVALID_CODE)(_REQUEST)
rpc_error = exception_context.exception
self.assertEqual(rpc_error.code(), grpc.StatusCode.UNKNOWN)
# Invalid status code exception raised during coversion
self.assertIn('Invalid status code', rpc_error.details())
if __name__ == '__main__':
logging.basicConfig()
unittest.main(verbosity=2)
| {
"content_hash": "b81c639ae7ace7ca48e4f38a1e00caa6",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 79,
"avg_line_length": 36.19135802469136,
"alnum_prop": 0.6735459662288931,
"repo_name": "firebase/grpc",
"id": "980cf5a67e7cf69a6ba8e826d929860c3fe86786",
"size": "6443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/grpcio_tests/tests_aio/status/grpc_status_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "5444"
},
{
"name": "Batchfile",
"bytes": "35774"
},
{
"name": "C",
"bytes": "3708933"
},
{
"name": "C#",
"bytes": "2162951"
},
{
"name": "C++",
"bytes": "12275592"
},
{
"name": "CMake",
"bytes": "495117"
},
{
"name": "CSS",
"bytes": "1519"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "Dockerfile",
"bytes": "169468"
},
{
"name": "Go",
"bytes": "34794"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "Java",
"bytes": "6259"
},
{
"name": "JavaScript",
"bytes": "84355"
},
{
"name": "M4",
"bytes": "69163"
},
{
"name": "Makefile",
"bytes": "1104867"
},
{
"name": "Mako",
"bytes": "5629"
},
{
"name": "Objective-C",
"bytes": "696194"
},
{
"name": "Objective-C++",
"bytes": "77574"
},
{
"name": "PHP",
"bytes": "392133"
},
{
"name": "PowerShell",
"bytes": "3226"
},
{
"name": "Python",
"bytes": "3401091"
},
{
"name": "Ruby",
"bytes": "982979"
},
{
"name": "Shell",
"bytes": "532295"
},
{
"name": "Starlark",
"bytes": "554304"
},
{
"name": "Swift",
"bytes": "3516"
},
{
"name": "TSQL",
"bytes": "4901"
},
{
"name": "XSLT",
"bytes": "9846"
}
],
"symlink_target": ""
} |
"""
WSGI Application
This application listens for a form. When a form is submitted, this application
takes the information submitted, formats it into a python dictionary, then
emails it to a specified email
"""
import os
import smtplib
import werkzeug
import urllib
import hashlib
from urllib import urlencode
from urllib2 import urlopen
from werkzeug.wrappers import Request, Response
from werkzeug.routing import Map, Rule
from werkzeug.exceptions import HTTPException
from werkzeug.wsgi import SharedDataMiddleware
from jinja2 import Environment, FileSystemLoader
from email.mime.text import MIMEText
from validate_email import validate_email
from datetime import datetime
import logging
import logging.handlers
import conf
import time
import json
class Forms(object):
"""
This class listens for a form submission, checks that the data is valid, and
sends the form data in a formatted message to the email specified in conf.py
"""
def __init__(self, controller, logger):
# Sets up the path to the template files
template_path = os.path.join(os.path.dirname(__file__), 'templates')
self.controller = controller
self.error = None
# Creates jinja template environment
self.jinja_env = Environment(loader=FileSystemLoader(template_path),
autoescape=True)
# When the browser is pointed at the root of the website, call
# on_form_page
self.url_map = Map([
Rule('/', endpoint='form_page'),
Rule('/server-status', endpoint='server_status'),
])
self.logger = logger
def dispatch_request(self, request):
"""Evaluates request to decide what happens"""
adapter = self.url_map.bind_to_environ(request.environ)
try:
endpoint, values = adapter.match()
return getattr(self, 'on_' + endpoint)(request, **values)
except HTTPException as error:
self.logger.error('formsender: %s', error)
return error
def wsgi_app(self, environ, start_response):
"""
Starts wsgi_app by creating a Request and Response based on the Request
"""
request = Request(environ)
response = self.dispatch_request(request)
return response(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
def on_server_status(self, request):
"""
Returns an OK on a GET. This is to support health checks by any
monitoring software on this application
"""
if request.method == 'GET':
return Response('OK', status=200)
# Do not process anything else
return Response('', status=400)
def on_form_page(self, request):
"""
Checks for valid form data, calls send_email, returns a redirect
"""
# Increment rate because we received a request
self.controller.increment_rate()
self.error = None
error_number = self.are_fields_invalid(request)
if request.method == 'POST' and error_number:
# Error was found
return self.handle_error(request, error_number)
elif request.method == 'POST':
# No errors
return self.handle_no_error(request)
else:
# Renders error message locally if sent GET request
self.logger.error('formsender: server received unhandled GET '
'request, expected POST request')
return self.error_redirect()
def are_fields_invalid(self, request):
"""
If a field in the request is invalid, sets the error message and returns
the error number, returns False if fields are valid
"""
# Sends request to each error function and returns first error it sees
if not is_valid_email(request):
self.error = 'Invalid Email'
error_number = 1
invalid_option = 'email'
elif not validate_name(request):
self.error = 'Invalid Name'
error_number = 2
invalid_option = 'name'
elif (not (is_hidden_field_empty(request) and
is_valid_token(request)) or
not (is_valid_fields_to_join(request))):
self.error = 'Improper Form Submission'
error_number = 3
invalid_option = 'name'
elif self.controller.is_rate_violation():
self.error = 'Too Many Requests'
error_number = 4
invalid_option = 'name'
elif self.controller.is_duplicate(create_msg(request)):
self.error = 'Duplicate Request'
error_number = 5
invalid_option = 'name'
elif not is_valid_recaptcha(request):
self.error = 'Invalid Recaptcha'
error_number = 6
invalid_option = 'name'
else:
# If nothing above is true, there is no error
return False
# There is an error if it got this far
self.logger.warn('formsender: received %s: %s from %s',
self.error,
request.form[invalid_option],
request.form['email'])
return error_number
def handle_no_error(self, request):
"""
Creates a message and sends an email with no error, then redirects to
provided redirect url
"""
message = create_msg(request)
if message:
self.logger.debug('formsender: name is: %s', message['name'])
self.logger.debug('formsender: sending email from: %s',
message['email'])
# The following are optional fields, so first check that they exist
# in the message
if 'send_to' in message and message['send_to']:
self.logger.debug('formsender: sending email to: %s',
message['send_to'])
if 'mail_from' in message and message['mail_from']:
self.logger.debug('formsender: sending email from: %s',
message['mail_from'])
# Should log full request
self.logger.debug('formsender message: %s', message)
send_email(format_message(message), set_mail_subject(message),
send_to_address(message), set_mail_from(message))
redirect_url = message['redirect']
return werkzeug.utils.redirect(redirect_url, code=302)
else:
return self.error_redirect()
def handle_error(self, request, error_number):
"""Creates error url and redirects with error query"""
error_url = create_error_url(error_number, self.error, request)
return werkzeug.utils.redirect(error_url, code=302)
def error_redirect(self):
"""Renders local error html file"""
self.logger.error('formsender: POST request was empty')
template = self.jinja_env.get_template('error.html')
return Response(template.render(), mimetype='text/html', status=400)
class Controller(object):
"""
Track number of form submissions per second
__init__
set_time_diff
increment_rate
reset_rate
is_rate_violation
"""
def __init__(self):
# Rate variables
self.rate = 0
self.time_diff = 0
self.start_time = datetime.now()
# Duplicate-submission check variables
self.time_diff_hash = 0
self.start_time_hash = datetime.now()
self.hash_list = []
def set_time_diff(self, begin_time):
"""Returns time difference between begin_time and now in seconds"""
time_d = datetime.now() - begin_time
return time_d.seconds
# Rate methods
def increment_rate(self):
"""Increments self.rate by 1"""
self.rate += 1
def reset_rate(self):
"""Reset rate to initial values"""
self.rate = 0
self.start_time = datetime.now()
self.time_diff = 0
def is_rate_violation(self):
"""
Returns False if rate doesn't violate CEILING in 1 second (no violation)
and True otherwise (violation)
"""
self.time_diff = self.set_time_diff(self.start_time)
if self.time_diff < 1 and self.rate > conf.CEILING:
return True
elif self.time_diff > 1:
self.reset_rate()
return False
# Duplicate-submission check methods
def is_duplicate(self, submission):
"""Calculates a hash from a submission and adds it to the hash list"""
# Create a hexidecmal hash of the submission using sha512
init_hash = hashlib.sha512()
init_hash.update(str(submission))
sub_hash = init_hash.hexdigest()
# If the time difference is under the limit in settings, check for a
# duplicate hash in hash_list
if self.check_time_diff_hash():
return self.check_for_duplicate_hash(sub_hash)
# If the time difference is greater than the limit in settings, there is
# no duplicate since hash_list was reset in check_time_diff_hash
return False
def check_time_diff_hash(self):
"""
Checks time_diff_hash for a value greater than DUPL_CHECK_LIM from
conf.py
"""
self.time_diff_hash = self.set_time_diff(self.start_time_hash)
# If time difference is greater than DUPLICATE_CHECK_TIME, reset the
# hash list and time variables
if self.time_diff_hash > (conf.DUPLICATE_CHECK_TIME): # from conf.py
self.reset_hash()
return False
return True
def reset_hash(self):
"""Resets hash_list and hash_times"""
self.hash_list = []
self.time_diff_hash = 0
self.start_time_hash = datetime.now()
def check_for_duplicate_hash(self, sub_hash):
"""
Checks for a duplicate hash in hash_list
Returns True if there is a duplicate and False otherwise
"""
if sub_hash in self.hash_list:
return True
# If there is no duplicate, add hash to the list and return False
self.hash_list.append(sub_hash)
return False
# Standalone/helper functions
def create_app(with_static=True):
"""
Initializes Controller (controller) and Forms (app) objects, pass
controller to app to keep track of number of submissions per minute
"""
# Initiate a logger
logger = logging.getLogger('formsender')
handler = logging.handlers.SysLogHandler(address=conf.LOG_ADDR)
formatter = logging.Formatter('%(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
# Initiate rate/duplicate controller and application
controller = Controller()
app = Forms(controller, logger)
if with_static:
app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {
'/static': os.path.join(os.path.dirname(__file__), 'static')
})
return app
def create_msg(request):
"""Creates the message to be sent in the email"""
message = dict()
if request.method == 'POST':
# Takes the information from the request and puts it into the message
# dict. request.form cannot be returned directly because it is a
# multidict.
for key in request.form:
safe_key = key.encode('utf-8')
safe_value = request.form[key].encode('utf-8')
message[safe_key] = safe_value
# If there is a message, return it, otherwise return None
if message:
message['redirect'] = strip_query(message['redirect'])
return message
return None
return None
def is_valid_email(request):
"""
Check that email server exists at request.form['email']
return the email if it is valid, False if not
"""
valid_email = validate_email(request.form['email'],
check_mx=False, # DNS resolution is not reliable
verify=False) # disabling RCPT is occasionally used to fight spam
if valid_email:
return valid_email
return False
def is_valid_recaptcha(request):
"""
Check that recaptcha responce is valid
by sending a POST request to google's servers
"""
recaptchaURL = 'https://www.google.com/recaptcha/api/siteverify'
recaptcha_response = request.form['g-recaptcha-response']
secret_key = conf.RECAPTCHA_SECRET
URLParams = urlencode({
'secret': secret_key,
'response': recaptcha_response,
'remote_ip': request.remote_addr,
})
google_response = urlopen(recaptchaURL, URLParams.encode('utf-8')).read()
recaptcha_result = json.loads(google_response)
recaptcha_success = recaptcha_result.get('success', None)
return recaptcha_success
def validate_name(request):
"""
Make sure request has a 'name' field with more than just spaces return
stripped name if true, False if not
"""
name = request.form['name']
if name.strip():
return True
return False
def is_hidden_field_empty(request):
"""Make sure hidden 'last_name' field is empty, return True or False"""
if request.form['last_name'] == "":
return True
return False
def is_valid_token(request):
"""Make sure request's 'token' field matches TOKEN in conf.py"""
if request.form['token'] == conf.TOKEN:
return True
return False
def is_valid_fields_to_join(request):
"""
Make sure that if request has 'fields_to_join' field, that the specified
fields to join exist
"""
if 'fields_to_join' in request.form:
for field in request.form['fields_to_join'].split(','):
if field not in request.form and field != 'date':
return False
return True
def create_error_url(error_number, message, request):
"""Construct error message and append to redirect url"""
values = [('error', str(error_number)), ('message', message)]
query = urllib.urlencode(values)
return request.form['redirect'] + '?' + query
def strip_query(url):
"""Remove query string from a url"""
return url.split('?', 1)[0]
def format_message(msg):
"""Formats a dict (msg) into a nice-looking string"""
# Ignore these fields when writing to formatted message
hidden_fields = ['redirect', 'last_name', 'token', 'op',
'name', 'email', 'mail_subject', 'send_to',
'fields_to_join_name', 'support', 'ibm_power',
'mail_subject_prefix', 'mail_subject_key',
'g-recaptcha-response']
# Contact information goes at the top
f_message = ("Contact:\n--------\n"
"NAME: {0}\nEMAIL: {1}\n"
"\nInformation:\n------------\n"
.format(msg['name'], msg['email']))
# If fields_to_join_name specified, add the key, data to the dictionary
# Otherwise, create fields_to_join key, data and add to dictionary
if 'fields_to_join' in msg:
# handle fields_to_join
fields_to_join = msg['fields_to_join'].split(',') # list of fields
joined_data = (':'.join(str(int(time.time())) if field == 'date' else msg[field] for field in fields_to_join))
# If the fields to join name is specified, and the name does not exist
# as a key in current msg dictionary
if 'fields_to_join_name' in msg and msg['fields_to_join_name'] not in msg:
msg[str(msg['fields_to_join_name'])] = joined_data
else:
msg[str('Fields To Join')] = joined_data
msg.pop('fields_to_join', None)
# Create another dictionary that has lowercase title as key and original
# title as value
titles = {}
for key in msg:
titles[key.lower()] = key
# Write each formatted key in title case and corresponding message to
# f_message, each key and message is separated by two lines.
for key in sorted(titles):
if key not in hidden_fields:
f_message += \
('{0}:\n{1}\n\n'.format(convert_key_to_title(titles[key]),
msg[titles[key]]))
return f_message
def convert_key_to_title(snake_case_key):
"""Replace underscores with spaces and convert to title case"""
return snake_case_key.replace('_', ' ').title()
def set_mail_subject(message):
"""
Returns a string to be used as a subject in an email, format:
message['mail_subject_prefix']: message[message['mail_subject_key']
or
message['mail_subject_prefix']
or
message[message['mail_subject_key']]
or the default
'Form Submission'
"""
mail_subject = ''
# If mail_subject_prefix exists in the message dict and has content, add
# it to the mail_subject string. Then check if mail_subject_key also exists
# and points to valid data and append if necessary.
if 'mail_subject_prefix' in message and message['mail_subject_prefix']:
mail_subject += message['mail_subject_prefix']
if ('mail_subject_key' in message and
message['mail_subject_key'] and
message['mail_subject_key'] in message and
message[message['mail_subject_key']]):
mail_subject += ": {}".format(message[message['mail_subject_key']])
# If mail_subject_key is in the message and the field it points to exists,
# add it to the mail_subject. It is ok if it is an empty string, because
# it will just be ignored
elif ('mail_subject_key' in message and
message['mail_subject_key'] in message):
mail_subject += message[message['mail_subject_key']]
# Otherwise mail_subject if it has something or the default
return mail_subject if mail_subject else 'Form Submission'
def set_mail_from(message):
"""
Returns a string to be used to fill the 'from' field of and email
If no from address is provided in the html form, return 'from_default'
"""
# If a from address is included in html form, return it
if 'mail_from' in message and message['mail_from']:
return message['mail_from']
# If there is no explicit mail_from, return the user's submitted email
if 'email' in message and message['email']:
return message['email']
# If neither mail_from nor email is available, return from_default
return 'from_default'
def send_to_address(message):
"""
Returns a string to be used as the address the email is being sent to
Default is '[email protected]'
"""
# If a send to address is included in html form, return its assoc. string
if 'send_to' in message and message['send_to']:
return message['send_to']
# Otherwise, return default
return 'default'
def send_email(msg, subject, send_to_email='default',
mail_from='from_default'):
"""Sets up and sends the email"""
# Format the message and set the subject
msg_send = MIMEText(str(msg))
msg_send['Subject'] = subject
msg_send['To'] = conf.EMAIL[send_to_email]
msg_send['Sender'] = conf.SENDER
# print(msg_send)
# Sets up a temporary mail server to send from
smtp = smtplib.SMTP(conf.SMTP_HOST)
# Attempts to send the mail to EMAIL, with the message formatted as a string
try:
if (mail_from != 'from_default'):
smtp.sendmail(mail_from,
conf.EMAIL[send_to_email],
msg_send.as_string())
smtp.quit()
else:
smtp.sendmail(conf.FROM[mail_from],
conf.EMAIL[send_to_email],
msg_send.as_string())
smtp.quit()
except RuntimeError:
smtp.quit()
# Start application
if __name__ == '__main__':
from werkzeug.serving import run_simple
# Creates the app
app = create_app()
# Starts the listener
run_simple(conf.HOST, conf.PORT, app, use_debugger=True, use_reloader=True)
| {
"content_hash": "11509dd99c81088d88100813c9ec7960",
"timestamp": "",
"source": "github",
"line_count": 554,
"max_line_length": 118,
"avg_line_length": 36.46750902527076,
"alnum_prop": 0.6110973617779538,
"repo_name": "osuosl/formsender",
"id": "daf71217595f26a42af12403221b95a877ade281",
"size": "20203",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "request_handler.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1770"
},
{
"name": "Makefile",
"bytes": "777"
},
{
"name": "Python",
"bytes": "69091"
}
],
"symlink_target": ""
} |
import os
import sys
import re
def read_config(hosts_file):
host=[]
host_file=open(hosts_file)
# 判断第一行是否有主机组
for line in host_file:
if re.search("^#",line) or re.search("^ *$",line):
continue
host_info=line.strip().split("===")
if len(host_info) < 4:
print "the config file is err"
sys.exit()
host.append((host_info[0],host_info[1],host_info[2],host_info[3]))
return host
if __name__ == "__main__":
hosts_file="./switch"
host_info = read_config(hosts_file)
print host_info
| {
"content_hash": "43b2156e05251427136ae78861dfe6ea",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 74,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.5627177700348432,
"repo_name": "BillWang139967/zabbix_manager",
"id": "b8b532e0171eef00c24b7e89caac14d7621e18c1",
"size": "627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ZabbixTool/lib_zabbix/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "Python",
"bytes": "862323"
},
{
"name": "Roff",
"bytes": "10917"
},
{
"name": "Shell",
"bytes": "15269"
}
],
"symlink_target": ""
} |
"""
sentry.web.frontend.projects.keys
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django.contrib import messages
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.http import require_http_methods
from django.utils.translation import ugettext_lazy as _
from sentry.constants import MEMBER_OWNER
from sentry.models import ProjectKey
from sentry.permissions import (
can_remove_project_key, can_add_project_key
)
from sentry.plugins import plugins
from sentry.web.decorators import has_access
from sentry.web.helpers import render_to_response
@has_access(MEMBER_OWNER)
@csrf_protect
def manage_project_keys(request, team, project):
result = plugins.first('has_perm', request.user, 'edit_project', project)
if result is False and not request.user.is_superuser:
return HttpResponseRedirect(reverse('sentry'))
key_list = list(ProjectKey.objects.filter(
project=project,
).select_related('user', 'user_added').order_by('-id'))
for key in key_list:
key.project = project
key.can_remove = can_remove_project_key(request.user, key),
context = csrf(request)
context.update({
'team': team,
'page': 'keys',
'project': project,
'key_list': key_list,
'can_add_key': can_add_project_key(request.user, project),
})
return render_to_response('sentry/projects/keys.html', context, request)
@has_access(MEMBER_OWNER)
@csrf_protect
def new_project_key(request, team, project):
if not can_add_project_key(request.user, project):
return HttpResponseRedirect(reverse('sentry-manage-project-keys', args=[project.team.slug, project.slug]))
ProjectKey.objects.create(
project=project,
user_added=request.user,
)
return HttpResponseRedirect(reverse('sentry-manage-project-keys', args=[project.team.slug, project.slug]))
@require_http_methods(['POST'])
@has_access(MEMBER_OWNER)
@csrf_protect
def remove_project_key(request, team, project, key_id):
try:
key = ProjectKey.objects.get(id=key_id)
except ProjectKey.DoesNotExist:
return HttpResponseRedirect(reverse('sentry-manage-project-keys', args=[project.team.slug, project.slug]))
if not can_remove_project_key(request.user, key):
return HttpResponseRedirect(reverse('sentry-manage-project-keys', args=[project.team.slug, project.slug]))
key.delete()
messages.add_message(
request, messages.SUCCESS,
_('The API key (%s) was revoked.') % (key.public_key,))
return HttpResponseRedirect(reverse('sentry-manage-project-keys', args=[project.team.slug, project.slug]))
| {
"content_hash": "ae4fa374ca26a45f3848ac1ca9d33c1e",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 114,
"avg_line_length": 34.583333333333336,
"alnum_prop": 0.7087779690189329,
"repo_name": "rdio/sentry",
"id": "cb4e9b83afd554f16866f00a6f419a47033b8163",
"size": "2905",
"binary": false,
"copies": "1",
"ref": "refs/heads/rdio_sentry_6.4.4",
"path": "src/sentry/web/frontend/projects/keys.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "533425"
},
{
"name": "HTML",
"bytes": "258193"
},
{
"name": "JavaScript",
"bytes": "916843"
},
{
"name": "Makefile",
"bytes": "2982"
},
{
"name": "Python",
"bytes": "2881969"
},
{
"name": "Ruby",
"bytes": "8042"
}
],
"symlink_target": ""
} |
from djangopress.menus.menu import register
from django.template import Template, RequestContext
from .models import GallerySection
class GalleryRender(object):
_menu = Template("""
<ul{% if menu.class_tag%} class="{{ menu.class_tag }}"{% endif %}{% if menu.name %} id="{{ menu.name }}"{% endif %}>
{% for gallery in galleries %}
<li><a href="{{ gallery.get_absolute_url }}">{{ gallery.title }}</a></li>
{% endfor %}
</ul>""")
_item = Template("""
<li {% if item.id_tag %} id="{{ item.id_tag }}" {% endif %}{% if item.class_tag %} class="{{ item.class_tag }}"{% endif %}>
<a href="{{ item.link }}">{{ item.label }}</a>
<ul>
{% if item and item.label %}<li><a href="{{ item.link }}">{{ item.label }}</a></li>{% endif %}
{% for gallery in galleries %}
<li><a href="{{ gallery.get_absolute_url }}">{{ gallery.title }}</a></li>
{% endfor %}
</ul>
</li>""")
def render_menu(self, context, tree, menu=None, renderer=None):
galleries = GallerySection.objects.filter(
listed=True).order_by("position")
return self._menu.render(RequestContext(context.get("request"), {"tree": tree, "galleries": galleries}))
def render_item(self, context, item, sub_menu):
galleries = GallerySection.objects.filter(
listed=True).order_by("position")
return self._item.render(RequestContext(context.get("request"), {"item": item, "galleries": galleries}))
register('gallery', GalleryRender()) | {
"content_hash": "508d4b306cae3fb78b2977190d1b9063",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 135,
"avg_line_length": 44.63157894736842,
"alnum_prop": 0.5294811320754716,
"repo_name": "codefisher/djangopress",
"id": "2753d06bb3b1340358ab30c193d494ec94eaca00",
"size": "1696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangopress/gallery/menus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "37234"
},
{
"name": "HTML",
"bytes": "66474"
},
{
"name": "JavaScript",
"bytes": "14582"
},
{
"name": "Python",
"bytes": "345188"
},
{
"name": "Shell",
"bytes": "813"
}
],
"symlink_target": ""
} |
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import linecache
import re
import sys
import threading
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import tape
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import tf_contextlib
# Temporary global switch determining if we should enable the work-in-progress
# calls to the C API. Currently disabled by default but can be manually enabled
# e.g. in tests. This will be removed once all functionality is supported and
# there's no performance penalty with it enabled.
#
# TODO(skyewm) before we can remove this:
# - functions
# - import_graph_def() incrementally adds inputs to ops (i.e. creates an
# Operation and then calls _add_input()). The current code requires that all
# inputs be specified when creating the Operation (since we call
# TF_FinishOperation()).
# - ops_test.py (and others?) create unregistered op types
# - while loop
# - performance (e.g. delete/refactor redundant Python functionality, switch to
# new session API)
_USE_C_API = False
def tensor_id(tensor):
"""Returns a unique identifier for this Tensor."""
return tensor._id # pylint: disable=protected-access
class _NullContextmanager(object):
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions
def _override_helper(clazz_object, operator, func):
"""Overrides (string) operator on Tensors to call func.
Args:
clazz_object: the class to override for; either Tensor or SparseTensor.
operator: the string name of the operator to override.
func: the function that replaces the overridden operator.
Raises:
ValueError: If operator has already been overwritten,
or if operator is not allowed to be overwritten.
"""
existing = getattr(clazz_object, operator, None)
if existing is not None:
# Check to see if this is a default method-wrapper or slot wrapper which
# will be true for the comparison operators.
if not isinstance(existing, type(object.__lt__)):
raise ValueError("operator %s cannot be overwritten again on class %s." %
(operator, clazz_object))
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
_TENSOR_LIKE_TYPES = tuple()
def is_dense_tensor_like(t):
"""EXPERIMENTAL: Returns true if `t` implements the tensor interface.
See `register_dense_tensor_like_type()` for the current definition of a
"tensor-like type".
Args:
t: An object.
Returns:
True iff `t` is an instance of one of the registered "tensor-like" types.
"""
return isinstance(t, _TENSOR_LIKE_TYPES)
def register_dense_tensor_like_type(tensor_type):
"""EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface.
A "tensor-like type" can represent a single dense tensor, and implements
the `name` and `dtype` properties.
Args:
tensor_type: A type implementing the tensor interface.
Raises:
TypeError: If `tensor_type` does not implement the tensor interface.
"""
try:
if not isinstance(tensor_type.name, property):
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
try:
if not isinstance(tensor_type.dtype, property):
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
# We expect this list to be small, so choose quadratic complexity
# for registration, so that we have a tuple that can be used for
# more efficient `isinstance` checks later.
global _TENSOR_LIKE_TYPES
_TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])
def uid():
"""A unique (within this program execution) integer."""
return c_api.TFE_Py_UID()
def numpy_text(tensor, is_repr=False):
"""Human readable representation of a tensor's numpy value."""
if tensor.dtype.is_numpy_compatible:
text = repr(tensor.numpy()) if is_repr else str(tensor.numpy())
else:
text = "<unprintable>"
if "\n" in text:
text = "\n" + text
return text
# NOTE(ebrevdo): Do not subclass this. If you do, I will break you on purpose.
class _TensorLike(object):
"""Internal cls for grouping Tensor, SparseTensor, ..., for is_instance."""
pass
class Tensor(_TensorLike):
"""Represents one of the outputs of an `Operation`.
A `Tensor` is a symbolic handle to one of the outputs of an
`Operation`. It does not hold the values of that operation's output,
but instead provides a means of computing those values in a
TensorFlow @{tf.Session}.
This class has two primary purposes:
1. A `Tensor` can be passed as an input to another `Operation`.
This builds a dataflow connection between operations, which
enables TensorFlow to execute an entire `Graph` that represents a
large, multi-step computation.
2. After the graph has been launched in a session, the value of the
`Tensor` can be computed by passing it to
@{tf.Session.run}.
`t.eval()` is a shortcut for calling
`tf.get_default_session().run(t)`.
In the following example, `c`, `d`, and `e` are symbolic `Tensor`
objects, whereas `result` is a numpy array that stores a concrete
value:
```python
# Build a dataflow graph.
c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
e = tf.matmul(c, d)
# Construct a `Session` to execute the graph.
sess = tf.Session()
# Execute the graph and store the value that `e` represents in `result`.
result = sess.run(e)
```
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
"__getitem__",
"__pow__",
"__rpow__",
# Unary.
"__invert__",
"__neg__",
"__abs__",
"__matmul__",
"__rmatmul__"
}
def __init__(self, op, value_index, dtype):
"""Creates a new `Tensor`.
Args:
op: An `Operation`. `Operation` that computes this tensor.
value_index: An `int`. Index of the operation's endpoint that produces
this tensor.
dtype: A `DType`. Type of elements stored in this tensor.
Raises:
TypeError: If the op is not an `Operation`.
"""
if not isinstance(op, Operation):
raise TypeError("op needs to be an Operation: %s" % op)
self._op = op
self._value_index = value_index
self._dtype = dtypes.as_dtype(dtype)
self._shape = tensor_shape.unknown_shape()
# List of operations that use this Tensor as input. We maintain this list
# to easily navigate a computation graph.
self._consumers = []
# Attributes used for C++ shape inference. Not inspected, only forwarded.
# If set, will be a HandleData object from cpp_shape_inference.proto.
self._handle_data = None
self._id = uid()
@property
def op(self):
"""The `Operation` that produces this tensor as an output."""
return self._op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._dtype
@property
def graph(self):
"""The `Graph` that contains this tensor."""
return self._op.graph
@property
def name(self):
"""The string name of this tensor."""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
return "%s:%d" % (self._op.name, self._value_index)
@property
def device(self):
"""The name of the device on which this tensor will be produced, or None."""
return self._op.device
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of this tensor.
The shape is computed using shape inference functions that are
registered in the Op for each `Operation`. See
@{tf.TensorShape}
for more details of what a shape represents.
The inferred shape of a tensor is used to provide shape
information without having to launch the graph in a session. This
can be used for debugging, and providing early error messages. For
example:
```python
c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
print(c.shape)
==> TensorShape([Dimension(2), Dimension(3)])
d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
print(d.shape)
==> TensorShape([Dimension(4), Dimension(2)])
# Raises a ValueError, because `c` and `d` do not have compatible
# inner dimensions.
e = tf.matmul(c, d)
f = tf.matmul(c, d, transpose_a=True, transpose_b=True)
print(f.shape)
==> TensorShape([Dimension(3), Dimension(4)])
```
In some cases, the inferred shape may have unknown dimensions. If
the caller has additional information about the values of these
dimensions, `Tensor.set_shape()` can be used to augment the
inferred shape.
Returns:
A `TensorShape` representing the shape of this tensor.
"""
return self._shape
def __iter__(self):
if context.in_graph_mode():
raise TypeError(
"`Tensor` objects are not iterable when eager execution is not "
"enabled. To iterate over this tensor use `tf.map_fn`.")
shape = self._shape_tuple()
if shape is None:
raise TypeError("Cannot iterate over a tensor with unknown shape.")
if not shape:
raise TypeError("Cannot iterate over a scalar tensor.")
if shape[0] is None:
raise TypeError(
"Cannot iterate over a tensor with unknown first dimension.")
for i in xrange(shape[0]):
yield self[i]
def _shape_as_list(self):
if self._shape.ndims is not None:
return [dim.value for dim in self._shape.dims]
else:
return None
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
def _rank(self):
"""Integer rank of this Tensor, if known, else None.
Returns:
Integer rank or None
"""
return self._shape.ndims
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def set_shape(self, shape):
"""Updates the shape of this tensor.
This method can be called multiple times, and will merge the given
`shape` with the current shape of this tensor. It can be used to
provide additional information about the shape of this tensor that
cannot be inferred from the graph alone. For example, this can be used
to provide additional information about the shapes of images:
```python
_, image_data = tf.TFRecordReader(...).read(...)
image = tf.image.decode_png(image_data, channels=3)
# The height and width dimensions of `image` are data dependent, and
# cannot be computed without executing the op.
print(image.shape)
==> TensorShape([Dimension(None), Dimension(None), Dimension(3)])
# We know that each image in this dataset is 28 x 28 pixels.
image.set_shape([28, 28, 3])
print(image.shape)
==> TensorShape([Dimension(28), Dimension(28), Dimension(3)])
```
Args:
shape: A `TensorShape` representing the shape of this tensor.
Raises:
ValueError: If `shape` is not compatible with the current shape of
this tensor.
"""
# TODO(skyewm): call C API
self._shape = self._shape.merge_with(shape)
@property
def value_index(self):
"""The index of this tensor in the outputs of its `Operation`."""
return self._value_index
def consumers(self):
"""Returns a list of `Operation`s that consume this tensor.
Returns:
A list of `Operation`s.
"""
return self._consumers
def _add_consumer(self, consumer):
"""Add a consumer to this tensor.
Args:
consumer: an Operation.
Raises:
TypeError: if the consumer is not an Operation.
"""
if not isinstance(consumer, Operation):
raise TypeError("Consumer must be an Operation: %s" % consumer)
self._consumers.append(consumer)
def _as_node_def_input(self):
"""Return a value to use for the NodeDef "input" attribute.
The returned string can be used in a NodeDef "input" attribute
to indicate that the NodeDef uses this Tensor as input.
Raises:
ValueError: if this Tensor's Operation does not have a name.
Returns:
a string.
"""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
if self._value_index == 0:
return self._op.name
else:
return "%s:%d" % (self._op.name, self._value_index)
def _as_tf_output(self):
# pylint: disable=protected-access
assert self.op._c_op
return c_api_util.tf_output(self.op._c_op, self.value_index)
# pylint: enable=protected-access
def __str__(self):
return "Tensor(\"%s\"%s%s%s)" % (
self.name, (", shape=%s" % self.get_shape())
if self.get_shape().ndims is not None else "",
(", dtype=%s" % self._dtype.name)
if self._dtype else "", (", device=%s" % self.device)
if self.device else "")
def __repr__(self):
return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(),
self._dtype.name)
def __hash__(self):
# Necessary to support Python's collection membership operators
return id(self)
def __eq__(self, other):
# Necessary to support Python's collection membership operators
return id(self) == id(other)
# NOTE(mrry): This enables the Tensor's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Tensor class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Tensors interact
# with ndarrays.
__array_priority__ = 100
@staticmethod
def _override_operator(operator, func):
_override_helper(Tensor, operator, func)
def __bool__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This overload raises a `TypeError` when the user inadvertently
treats a `Tensor` as a boolean (e.g. in an `if` statement). For
example:
```python
if tf.constant(True): # Will raise.
# ...
if tf.constant(5) < tf.constant(7): # Will raise.
# ...
```
This disallows ambiguities between testing the Python value vs testing the
dynamic condition of the `Tensor`.
Raises:
`TypeError`.
"""
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.")
def __nonzero__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This is the Python 2.x counterpart to `__bool__()` above.
Raises:
`TypeError`.
"""
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.")
def eval(self, feed_dict=None, session=None):
"""Evaluates this tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `Tensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See @{tf.Session.run} for a
description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
Returns:
A numpy array corresponding to the value of this tensor.
"""
return _eval_using_default_session(self, feed_dict, self.graph, session)
def _dup(self):
ret = copy.copy(self)
ret._id = uid() # pylint: disable=protected-access
return ret
# TODO(agarwal): consider getting rid of this.
class _EagerTensorBase(Tensor):
"""Base class for EagerTensor."""
@property
def dtype(self):
# Note: using the intern table directly here as this is
# performance-sensitive in some models.
return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access
def numpy(self):
"""Returns a numpy array or a scalar with the same contents as the Tensor.
TODO(ashankar,agarwal): Perhaps this should NOT reference the underlying
buffer but instead always explicitly copy? Note that currently it may or may
not copy based on whether the numpy data is properly aligned or not.
Returns:
A numpy array or a scalar. Numpy array may share memory with the
Tensor object. Any changes to one may be reflected in the other. A scalar
value is returned when self has rank 0.
Raises:
ValueError: if the type of this Tensor is not representable in numpy.
"""
if self.dtype == dtypes.resource:
raise ValueError("Resource handles are not convertible to numpy.")
return self.cpu()._numpy() # pylint: disable=protected-access
# __int__ and __float__ may copy the tensor to CPU and
# only work for scalars; values are cast as per numpy.
def __int__(self):
return int(self.numpy())
def __float__(self):
return float(self.numpy())
def __array__(self):
return np.array(self.numpy())
def __format__(self, format_spec):
return self.numpy().__format__(format_spec)
def _numpy(self):
raise NotImplementedError()
def __copy__(self):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
return self
def __deepcopy__(self, memo):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
del memo
return self
def _datatype_enum(self):
raise NotImplementedError()
def _shape_tuple(self):
"""The shape of this Tensor, as a tuple.
This is more performant than tuple(shape().as_list()) as it avoids
two list and one object creation. Marked private for now as from an API
perspective, it would be better to have a single performant way of
getting a shape rather than exposing shape() and shape_tuple()
(and heaven forbid, shape_list() etc. as well!). Punting on that for now,
but ideally one would work things out and remove the need for this method.
Returns:
tuple with the shape.
"""
raise NotImplementedError()
def _rank(self):
"""Integer rank of this Tensor.
Unlike regular Tensors, the rank is always known for EagerTensors.
This is more performant than len(self._shape_tuple())
Returns:
Integer rank
"""
raise NotImplementedError()
def _copy_to_device(self, context, device): # pylint: disable=redefined-outer-name
raise NotImplementedError()
def __str__(self):
return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self),
self.shape,
self.dtype.name)
def __repr__(self):
return "<tf.Tensor: id=%s, shape=%s, dtype=%s, numpy=%s>" % (
self._id, self.shape, self.dtype.name, numpy_text(self, is_repr=True))
@staticmethod
def _override_operator(name, func):
setattr(_EagerTensorBase, name, func)
def _copy(self, ctx=None, device_name=None):
"""Copies tensor to dest device."""
# pylint: disable=protected-access
# Creates a new tensor on the dest device.
if ctx is None:
ctx = context.context()
if device_name is None:
device_name = ctx.device_name
# pylint: disable=protected-access
try:
new_tensor = self._copy_to_device(context=ctx._handle, device=device_name)
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
# Record the copy on tape and define backprop copy as well.
if not context.in_graph_mode():
self_device = self.device
def grad_fun(dresult):
return [dresult._copy(device_name=self_device)]
tape.record_operation("_copy", [new_tensor], [self], grad_fun)
return new_tensor
# pylint: enable=protected-access
def _dup(self):
return self._copy(device_name=self.device)
@property
def shape(self):
return tensor_shape.TensorShape(self._shape_tuple())
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def _shape_as_list(self):
"""The shape of the tensor as a list."""
return list(self._shape_tuple())
def cpu(self):
"""A copy of this Tensor with contents backed by host memory."""
return self._copy(context.context(), "CPU:0")
def gpu(self, gpu_index=0):
"""A copy of this Tensor with contents backed by memory on the GPU.
Arguments:
gpu_index: Identifies which GPU to place the contents on the returned
Tensor in.
Returns:
A GPU-memory backed Tensor object initialized with the same contents
as this Tensor.
"""
return self._copy(context.context(), "GPU:" + str(gpu_index))
def __bool__(self):
if self._shape_tuple() != (): # pylint: disable=g-explicit-bool-comparison
raise ValueError(
"Non-scalar tensor %s cannot be converted to boolean." % repr(self))
if self.dtype != dtypes.bool:
raise ValueError(
"Non-boolean tensor %s cannot be converted to boolean." % repr(self))
return bool(self.cpu().numpy())
def __nonzero__(self):
return self.__bool__()
def set_shape(self, shape):
if not self.shape.is_compatible_with(shape):
raise ValueError(
"EagerTensor's shape %s is not compatible with supplied shape %s" %
(self.shape, shape))
# Methods not supported / implemented for Eager Tensors.
@property
def op(self):
raise AttributeError("op not supported for Eager Tensors.")
@property
def graph(self):
raise AttributeError("graph not supported for Eager Tensors.")
@property
def name(self):
raise AttributeError("name not supported for Eager Tensors.")
@property
def value_index(self):
raise AttributeError("value_index not supported for Eager Tensors.")
def consumers(self):
raise NotImplementedError("consumers not supported for Eager Tensors.")
def _add_consumer(self, consumer):
raise NotImplementedError("_add_consumer not supported for Eager Tensors.")
def _as_node_def_input(self):
raise NotImplementedError(
"_as_node_def_input not supported for Eager Tensors.")
def _as_tf_output(self):
raise NotImplementedError("_as_tf_output not supported for Eager Tensors.")
def eval(self, feed_dict=None, session=None):
raise NotImplementedError("eval not supported for Eager Tensors.")
# This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and
# registers it with the current module.
EagerTensor = c_api.TFE_Py_InitEagerTensor(_EagerTensorBase)
def _TensorTensorConversionFunction(t, dtype=None, name=None, as_ref=False):
_ = name, as_ref
if dtype and not dtype.is_compatible_with(t.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtype.name, t.dtype.name, str(t)))
return t
_tensor_conversion_func_registry = {
0: [(Tensor, _TensorTensorConversionFunction)]
}
_tensor_conversion_func_cache = {}
_tensor_conversion_func_lock = threading.Lock()
register_dense_tensor_like_type(Tensor)
def convert_to_tensor(value, dtype=None, name=None, preferred_dtype=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
preferred_dtype: Optional element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
An `Output` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value`.
RuntimeError: If a registered conversion function returns an invalid value.
"""
return internal_convert_to_tensor(
value=value,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def _error_prefix(name):
return "" if name is None else "%s: " % name
def internal_convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None):
"""Converts the given `value` to an `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
This function can be useful when composing a new operation in Python
All standard Python op constructors apply this function to each of their
Tensor-valued inputs, which allows those ops to accept numpy arrays, Python
lists, and scalars in addition to `Tensor` objects.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
as_ref: True if we want the mutable view of Variables, if applicable.
preferred_dtype: Optional element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
ctx: Optional: The value of context.context().
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value`.
RuntimeError: If a registered conversion function returns an invalid value.
"""
if ctx is None: ctx = context.context()
if ctx.in_eager_mode():
# Fast path for EagerTensors that don't need any conversion.
if isinstance(value, EagerTensor):
# Note that we don't check that value's dtype matches the dtype
# argument. We exepct that the C runtime will do that checking
# when we execute the kernel.
return value
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
unwrapped_type = type(value)
conversion_func_list = _tensor_conversion_func_cache.get(unwrapped_type, None)
if conversion_func_list is None:
with _tensor_conversion_func_lock:
conversion_func_list = []
for _, funcs_at_priority in sorted(
_tensor_conversion_func_registry.items()):
for base_type, conversion_func in funcs_at_priority:
if isinstance(value, base_type):
conversion_func_list.append((base_type, conversion_func))
_tensor_conversion_func_cache[unwrapped_type] = conversion_func_list
for base_type, conversion_func in conversion_func_list:
# If dtype is None but preferred_dtype is not None, we try to
# cast to preferred_dtype first.
ret = None
if dtype is None and preferred_dtype is not None:
try:
ret = conversion_func(
value, dtype=preferred_dtype, name=name, as_ref=as_ref)
except (TypeError, ValueError, errors.UnimplementedError,
errors.InvalidArgumentError):
# Could not coerce the conversion to use the preferred dtype.
ret = None
if ret is not None and ret is not NotImplemented:
if (ret.dtype.base_dtype !=
dtypes.as_dtype(preferred_dtype).base_dtype):
raise TypeError("convert_to_tensor did not convert to "
"the preferred dtype: %s vs %s " %
(ret.dtype.base_dtype,
dtypes.as_dtype(preferred_dtype).base_dtype))
if ret is None:
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
if ret is NotImplemented:
continue
if not isinstance(ret, Tensor):
raise RuntimeError(
"%sConversion function %r for type %s returned non-Tensor: %r" %
(_error_prefix(name), conversion_func, base_type, ret))
if dtype and not dtype.is_compatible_with(ret.dtype):
raise RuntimeError(
"%sConversion function %r for type %s returned incompatible "
"dtype: requested = %s, actual = %s" %
(_error_prefix(name), conversion_func, base_type, dtype.name,
ret.dtype.name))
return ret
raise TypeError("%sCannot convert %r with type %s to Tensor: "
"no conversion function registered." %
(_error_prefix(name), value, unwrapped_type))
def internal_convert_n_to_tensor(values,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
preferred_dtype: Optional element type for the returned tensors,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
ctx: The value of context.context().
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a list.")
ret = []
if ctx is None: ctx = context.context()
for i, value in enumerate(values):
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor(
value,
dtype=dtype,
name=n,
as_ref=as_ref,
preferred_dtype=preferred_dtype,
ctx=ctx))
return ret
def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
preferred_dtype: Optional element type for the returned tensors,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor(
values=values,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def convert_to_tensor_or_indexed_slices(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_indexed_slices(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_indexed_slices(value,
dtype=None,
name=None,
as_ref=False):
"""Converts the given object to an `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, _TensorLike):
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return internal_convert_to_tensor(
value, dtype=dtype, name=name, as_ref=as_ref)
def internal_convert_n_to_tensor_or_indexed_slices(values,
dtype=None,
name=None,
as_ref=False):
"""Converts `values` to a list of `Tensor` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a list.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_indexed_slices(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
Returns:
A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_indexed_slices(
values=values, dtype=dtype, name=name, as_ref=False)
# TODO(josh11b): Add ctx argument to conversion_func() signature.
def register_tensor_conversion_function(base_type,
conversion_func,
priority=100):
"""Registers a function for converting objects of `base_type` to `Tensor`.
The conversion function must have the following signature:
```python
def conversion_func(value, dtype=None, name=None, as_ref=False):
# ...
```
It must return a `Tensor` with the given `dtype` if specified. If the
conversion function creates a new `Tensor`, it should use the given
`name` if specified. All exceptions will be propagated to the caller.
The conversion function may return `NotImplemented` for some
inputs. In this case, the conversion process will continue to try
subsequent conversion functions.
If `as_ref` is true, the function must return a `Tensor` reference,
such as a `Variable`.
NOTE: The conversion functions will execute in order of priority,
followed by order of registration. To ensure that a conversion function
`F` runs before another conversion function `G`, ensure that `F` is
registered with a smaller priority than `G`.
Args:
base_type: The base type or tuple of base types for all objects that
`conversion_func` accepts.
conversion_func: A function that converts instances of `base_type` to
`Tensor`.
priority: Optional integer that indicates the priority for applying this
conversion function. Conversion functions with smaller priority values
run earlier than conversion functions with larger priority values.
Defaults to 100.
Raises:
TypeError: If the arguments do not have the appropriate type.
"""
global _tensor_conversion_func_cache
with _tensor_conversion_func_lock:
if not (isinstance(base_type, type) or
(isinstance(base_type, tuple) and
all(isinstance(x, type) for x in base_type))):
raise TypeError("base_type must be a type or a tuple of types.")
if not callable(conversion_func):
raise TypeError("conversion_func must be callable.")
try:
funcs_at_priority = _tensor_conversion_func_registry[priority]
except KeyError:
funcs_at_priority = []
_tensor_conversion_func_registry[priority] = funcs_at_priority
funcs_at_priority.append((base_type, conversion_func))
_tensor_conversion_func_cache = {}
class IndexedSlices(_TensorLike):
"""A sparse representation of a set of tensor slices at given indices.
This class is a simple wrapper for a pair of `Tensor` objects:
* `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`.
* `indices`: A 1-D integer `Tensor` with shape `[D0]`.
An `IndexedSlices` is typically used to represent a subset of a larger
tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`.
The values in `indices` are the indices in the first dimension of
the slices that have been extracted from the larger tensor.
The dense tensor `dense` represented by an `IndexedSlices` `slices` has
```python
dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]
```
The `IndexedSlices` class is used principally in the definition of
gradients for operations that have sparse gradients
(e.g. @{tf.gather}).
Contrast this representation with
@{tf.SparseTensor},
which uses multi-dimensional indices and scalar values.
"""
def __init__(self, values, indices, dense_shape=None):
"""Creates an `IndexedSlices`."""
_get_graph_from_inputs([values, indices, dense_shape])
self._values = values
self._indices = indices
self._dense_shape = dense_shape
@property
def values(self):
"""A `Tensor` containing the values of the slices."""
return self._values
@property
def indices(self):
"""A 1-D `Tensor` containing the indices of the slices."""
return self._indices
@property
def dense_shape(self):
"""A 1-D `Tensor` containing the shape of the corresponding dense tensor."""
return self._dense_shape
@property
def name(self):
"""The name of this `IndexedSlices`."""
return self.values.name
@property
def device(self):
"""The name of the device on which `values` will be produced, or `None`."""
return self.values.device
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self.values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self.values.dtype
@property
def graph(self):
"""The `Graph` that contains the values, indices, and shape tensors."""
return self._values.graph
def __str__(self):
return "IndexedSlices(indices=%s, values=%s%s)" % (
self._indices, self._values, (", dense_shape=%s" % self._dense_shape)
if self._dense_shape is not None else "")
def __neg__(self):
return IndexedSlices(-self.values, self.indices, self.dense_shape)
IndexedSlicesValue = collections.namedtuple(
"IndexedSlicesValue", ["values", "indices", "dense_shape"])
def _device_string(dev_spec):
if isinstance(dev_spec, pydev.DeviceSpec):
return dev_spec.to_string()
else:
return dev_spec
def _NodeDef(op_type, name, device=None, attrs=None): # pylint: disable=redefined-outer-name
"""Create a NodeDef proto.
Args:
op_type: Value for the "op" attribute of the NodeDef proto.
name: Value for the "name" attribute of the NodeDef proto.
device: string, device, or function from NodeDef to string.
Value for the "device" attribute of the NodeDef proto.
attrs: Optional dictionary where the key is the attribute name (a string)
and the value is the respective "attr" attribute of the NodeDef proto (an
AttrValue).
Returns:
A node_def_pb2.NodeDef protocol buffer.
"""
node_def = node_def_pb2.NodeDef()
node_def.op = compat.as_bytes(op_type)
node_def.name = compat.as_bytes(name)
if attrs is not None:
for k, v in six.iteritems(attrs):
node_def.attr[k].CopyFrom(v)
if device is not None:
if callable(device):
node_def.device = device(node_def)
else:
node_def.device = _device_string(device)
return node_def
# Copied from core/framework/node_def_util.cc
# TODO(mrry,josh11b): Consolidate this validation in C++ code.
_VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/]*$")
_VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/]*$")
def _create_c_op(graph, node_def, inputs, control_inputs):
"""Creates a TF_Operation.
Args:
graph: a `Graph`.
node_def: `node_def_pb2.NodeDef` for the operation to create.
inputs: A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs, e.g. "int64 * N",
"list(int64)"). The length of the list should be equal to the number of
inputs specified by this operation's op def.
control_inputs: A list of `Operation`s to set as control dependencies.
Returns:
A wrapped TF_Operation*.
"""
# pylint: disable=protected-access
op_desc = c_api.TF_NewOperation(graph._c_graph,
compat.as_str(node_def.op),
compat.as_str(node_def.name))
# Add inputs
for op_input in inputs:
if isinstance(op_input, (list, tuple)):
c_api.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input])
else:
c_api.TF_AddInput(op_desc, op_input._as_tf_output())
# Add control inputs
for control_input in control_inputs:
c_api.TF_AddControlInput(op_desc, control_input._c_op)
# pylint: enable=protected-access
# Add attrs
for name, attr_value in node_def.attr.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
with errors.raise_exception_on_not_ok_status() as status:
c_api.TF_SetAttrValueProto(op_desc,
compat.as_str(name), serialized, status)
with errors.raise_exception_on_not_ok_status() as status:
c_op = c_api.TF_FinishOperation(op_desc, status)
return c_op
class Operation(object):
"""Represents a graph node that performs computation on tensors.
An `Operation` is a node in a TensorFlow `Graph` that takes zero or
more `Tensor` objects as input, and produces zero or more `Tensor`
objects as output. Objects of type `Operation` are created by
calling a Python op constructor (such as
@{tf.matmul})
or @{tf.Graph.create_op}.
For example `c = tf.matmul(a, b)` creates an `Operation` of type
"MatMul" that takes tensors `a` and `b` as input, and produces `c`
as output.
After the graph has been launched in a session, an `Operation` can
be executed by passing it to
@{tf.Session.run}.
`op.run()` is a shortcut for calling `tf.get_default_session().run(op)`.
"""
def __init__(self,
node_def,
g,
inputs=None,
output_types=None,
control_inputs=None,
input_types=None,
original_op=None,
op_def=None):
r"""Creates an `Operation`.
NOTE: This constructor validates the name of the `Operation` (passed
as `node_def.name`). Valid `Operation` names match the following
regular expression:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]*
Args:
node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`.
Used for attributes of `node_def_pb2.NodeDef`, typically `name`,
`op`, and `device`. The `input` attribute is irrelevant here
as it will be computed when generating the model.
g: `Graph`. The parent graph.
inputs: list of `Tensor` objects. The inputs to this `Operation`.
output_types: list of `DType` objects. List of the types of the
`Tensors` computed by this operation. The length of this list indicates
the number of output endpoints of the `Operation`.
control_inputs: list of operations or tensors from which to have a
control dependency.
input_types: List of `DType` objects representing the
types of the tensors accepted by the `Operation`. By default
uses `[x.dtype.base_dtype for x in inputs]`. Operations that expect
reference-typed inputs must specify these explicitly.
original_op: Optional. Used to associate the new `Operation` with an
existing `Operation` (for example, a replica with the op that was
replicated).
op_def: Optional. The `op_def_pb2.OpDef` proto that describes the
op type that this `Operation` represents.
Raises:
TypeError: if control inputs are not Operations or Tensors,
or if `node_def` is not a `NodeDef`,
or if `g` is not a `Graph`,
or if `inputs` are not tensors,
or if `inputs` and `input_types` are incompatible.
ValueError: if the `node_def` name is not valid.
"""
# For internal use only: `node_def` can be set to a TF_Operation to create
# an Operation for that op. This is useful for creating Operations for ops
# indirectly created by C API methods, e.g. the ops created by
# TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields
# except `control_inputs` should be None.
if isinstance(node_def, node_def_pb2.NodeDef):
if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
if not _VALID_OP_NAME_REGEX.match(node_def.name):
raise ValueError("'%s' is not a valid node name" % node_def.name)
self._node_def = copy.deepcopy(node_def)
c_op = None
elif type(node_def).__name__ == "SwigPyObject":
assert inputs is None
assert output_types is None
assert input_types is None
assert original_op is None
assert op_def is None
self._node_def = None
c_op = node_def
else:
raise TypeError("node_def needs to be a NodeDef: %s" % node_def)
if not isinstance(g, Graph):
raise TypeError("g needs to be a Graph: %s" % g)
self._graph = g
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
raise TypeError("inputs needs to be a list of Tensors: %s" % inputs)
self._inputs = list(inputs) # Defensive copy.
for a in self._inputs:
if not isinstance(a, Tensor):
raise TypeError("input needs to be a Tensor: %s" % a)
if input_types is None:
input_types = [i.dtype.base_dtype for i in self._inputs]
else:
if not all(
x.is_compatible_with(i.dtype)
for i, x in zip(self._inputs, input_types)):
raise TypeError("In op '%s', input types (%s) are not compatible "
"with expected types (%s)" %
(self.node_def.name, [i.dtype for i in self._inputs],
input_types))
self._input_types_val = input_types
# Build the list of control inputs.
self._control_inputs = []
if control_inputs:
for c in control_inputs:
control_op = None
if isinstance(c, Operation):
control_op = c
elif isinstance(c, (Tensor, IndexedSlices)):
control_op = c.op
else:
raise TypeError("Control input must be an Operation, "
"a Tensor, or IndexedSlices: %s" % c)
self._control_inputs.append(control_op)
self._original_op = original_op
self._op_def = op_def
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
# Initialize self._c_op.
if c_op:
# TODO(skyewm): remove this assert when we remove USE_C_API
assert self._graph._c_graph # pylint: disable=protected-access
self._c_op = c_op
self._add_control_inputs(self._control_inputs)
elif self._graph._c_graph: # pylint: disable=protected-access
if self._op_def:
# TODO(skyewm): op_def_library.apply_op() flattens the incoming
# inputs. Refactor so we don't have to do this here.
grouped_inputs = self._reconstruct_sequence_inputs(
self._op_def, self._inputs, self._node_def.attr)
else:
# If no OpDef is specified, assume all inputs are scalar.
grouped_inputs = self._inputs
self._c_op = _create_c_op(self._graph, self._node_def, grouped_inputs,
self._control_inputs)
else:
self._c_op = None
# Mark that we consume the inputs.
for input_tensor in self.inputs:
input_tensor._add_consumer(self) # pylint: disable=protected-access
# Initialize self._outputs.
if self._c_op:
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
output_types = [
c_api.TF_OperationOutputType(c_api_util.tf_output(self._c_op, i))
for i in range(num_outputs)]
assert output_types is not None
elif output_types is None:
output_types = []
self._output_types_val = output_types
self._outputs = [
Tensor(self, i, output_type)
for i, output_type in enumerate(output_types)
]
# Add this op to the current control flow context.
self._control_flow_context = g._get_control_flow_context() # pylint: disable=protected-access
if self._control_flow_context is not None:
self._control_flow_context.AddOp(self)
# NOTE(keveman): Control flow context's AddOp could be creating new ops and
# setting op.inputs[index] = new_op. Thus the new ops' id could be larger
# than this op's id even though this op depend on them. Therefore, delaying
# assigning id to this op until all ops this could be dependent on are
# created.
self._id_value = self._graph._next_id() # pylint: disable=protected-access
self._recompute_node_def()
self._graph._add_op(self) # pylint: disable=protected-access
def _reconstruct_sequence_inputs(self, op_def, inputs, attrs):
"""Regroups a flat list of input tensors into scalar and sequence inputs.
Args:
op_def: The `op_def_pb2.OpDef` (for knowing the input types)
inputs: a list of input `Tensor`s to the op.
attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define
how long each sequence is)
Returns:
A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs).
"""
grouped_inputs = []
i = 0
for input_arg in op_def.input_arg:
if input_arg.number_attr:
input_len = attrs[input_arg.number_attr].i
is_sequence = True
elif input_arg.type_list_attr:
input_len = len(attrs[input_arg.type_list_attr].list.type)
is_sequence = True
else:
input_len = 1
is_sequence = False
if is_sequence:
grouped_inputs.append(inputs[i:i + input_len])
else:
grouped_inputs.append(inputs[i])
i += input_len
assert i == len(inputs)
return grouped_inputs
def colocation_groups(self):
"""Returns the list of colocation groups of the op."""
default_colocation_group = [
compat.as_bytes("loc:@%s" % self.name)
]
try:
class_attr = self.get_attr("_class")
except ValueError:
# This op has no explicit colocation group, so it is itself its
# own root of a colocation group.
return default_colocation_group
attr_groups = [
class_name for class_name in class_attr
if class_name.startswith(b"loc:@")
]
# If there are no colocation groups in the explicit _class field,
# return the default colocation group.
return attr_groups if attr_groups else default_colocation_group
def values(self):
"""DEPRECATED: Use outputs."""
return tuple(self.outputs)
def _get_control_flow_context(self):
"""Returns the control flow context of this op.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context of this op.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
@property
def name(self):
"""The full name of this operation."""
if self._c_op:
return c_api.TF_OperationName(self._c_op)
else:
return self._node_def.name
@property
def _id(self):
"""The unique integer id of this operation."""
return self._id_value
@property
def device(self):
"""The name of the device to which this op has been assigned, if any.
Returns:
The string name of the device to which this op has been
assigned, or an empty string if it has not been assigned to a
device.
"""
if self._c_op:
return c_api.TF_OperationDevice(self._c_op)
else:
return self._node_def.device
@property
def _output_types(self):
"""List this operation's output types.
Returns:
List of the types of the Tensors computed by this operation.
Each element in the list is an integer whose value is one of
the TF_DataType enums defined in c_api.h
The length of this list indicates the number of output endpoints
of the operation.
"""
if self._c_op:
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
output_types = [
c_api.TF_OperationOutputType(self._tf_output(i))
for i in xrange(num_outputs)
]
# TODO(iga): Remove this assert after converting to C API by default.
# Just being a bit paranoid here.
assert self._output_types_val == output_types
# In all the tests we have output_types that are passed into
# Operation.__init__ are a list of ints (which is illegal according
# to the docstring), but input_types are instances of DType.
# This extra assert is to catch if we ever use DType for output_types.
if output_types:
assert isinstance(output_types[0], int)
return output_types
else:
return self._output_types_val
def _tf_output(self, output_idx):
"""Create and return a new TF_Output for output_idx'th output of this op."""
assert self._c_op
tf_output = c_api.TF_Output()
tf_output.oper = self._c_op
tf_output.index = output_idx
return tf_output
def _tf_input(self, input_idx):
"""Create and return a new TF_Input for input_idx'th input of this op."""
assert self._c_op
tf_input = c_api.TF_Input()
tf_input.oper = self._c_op
tf_input.index = input_idx
return tf_input
def _set_device(self, device): # pylint: disable=redefined-outer-name
"""Set the device of this operation.
Args:
device: string or device.. The device to set.
"""
if self._c_op:
c_api.SetRequestedDevice(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
_device_string(device))
else:
self._node_def.device = _device_string(device)
def _add_input(self, tensor, dtype=None):
"""Add a new input to this operation.
Args:
tensor: the Tensor to add as an input.
dtype: tf.DType: type of the input; defaults to
the tensor's dtype.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
assert not self._c_op, (
"Operation._add_input doesn't work with C API")
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
if dtype is None:
dtype = tensor.dtype
else:
dtype = dtypes.as_dtype(dtype)
if not dtype.is_compatible_with(tensor.dtype):
raise TypeError(
"Cannot convert a tensor of type %s to an input of type %s" %
(tensor.dtype.name, dtype.name))
self._inputs.append(tensor)
self._input_types_val.append(dtype)
tensor._add_consumer(self) # pylint: disable=protected-access
self._recompute_node_def()
def _update_input(self, index, tensor):
"""Update the input to this operation at the given index.
NOTE: This is for TF internal use only. Please don't use it.
Args:
index: the index of the input to update.
tensor: the Tensor to be used as the input at the given index.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
if self._c_op:
with errors.raise_exception_on_not_ok_status() as status:
c_api.UpdateEdge(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._tf_input(index),
status)
else:
self._inputs[index].consumers().remove(self)
self._inputs[index] = tensor
self._input_types_val[index] = tensor.dtype
tensor._add_consumer(self) # pylint: disable=protected-access
self._recompute_node_def()
def _add_control_inputs(self, ops):
"""Add a list of new control inputs to this operation.
Args:
ops: the list of Operations to add as control input.
Raises:
TypeError: if ops is not a list of Operations.
ValueError: if any op in ops is from a different graph.
"""
if self._c_op:
for op in ops:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
else:
if ops:
for op in ops:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
_assert_same_graph(self, op)
self._control_inputs.append(op)
self._recompute_node_def()
def _add_control_input(self, op):
"""Add a new control input to this operation.
Args:
op: the Operation to add as control input.
Raises:
TypeError: if op is not an Operation.
ValueError: if op is from a different graph.
"""
if self._c_op:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
else:
self._add_control_inputs([op])
# Methods below are used when building the NodeDef and Graph proto.
def _recompute_node_def(self):
# TODO(skyewm): remove this function when we switch to C API
if self._c_op: return
del self._node_def.input[:]
# pylint: disable=protected-access
self._node_def.input.extend([t._as_node_def_input() for t in self._inputs])
# pylint: enable=protected-access
if self._control_inputs:
self._node_def.input.extend(
["^%s" % op.name for op in self._control_inputs])
def __str__(self):
return str(self.node_def)
def __repr__(self):
return "<tf.Operation '%s' type=%s>" % (self.name, self.type)
@property
def outputs(self):
"""The list of `Tensor` objects representing the outputs of this op."""
return self._outputs
# pylint: disable=protected-access
class _InputList(object):
"""Immutable input list wrapper."""
def __init__(self, op):
self._op = op
def __iter__(self):
return iter(self._op._inputs)
def __len__(self):
return len(self._op._inputs)
def __bool__(self):
return bool(self._op._inputs)
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __getitem__(self, i):
return self._op._inputs[i]
# pylint: enable=protected-access
@property
def inputs(self):
"""The list of `Tensor` objects representing the data inputs of this op."""
if self._c_op:
tf_outputs = c_api.GetOperationInputs(self._c_op)
# TODO(skyewm): return Operation._InputList
# pylint: disable=protected-access
return [self.graph._get_tensor_by_tf_output(tf_output)
for tf_output in tf_outputs]
# pylint: enable=protected-access
else:
return Operation._InputList(self)
@property
def _input_dtypes(self):
return self._input_types
@property
def _input_types(self):
if self._c_op:
num_inputs = c_api.TF_OperationNumInputs(self._c_op)
input_types = [
dtypes.as_dtype(c_api.TF_OperationInputType(self._tf_input(i)))
for i in xrange(num_inputs)
]
return input_types
else:
return self._input_types_val
@property
def control_inputs(self):
"""The `Operation` objects on which this op has a control dependency.
Before this op is executed, TensorFlow will ensure that the
operations in `self.control_inputs` have finished executing. This
mechanism can be used to run ops sequentially for performance
reasons, or to ensure that the side effects of an op are observed
in the correct order.
Returns:
A list of `Operation` objects.
"""
if self._c_op:
control_c_ops = c_api.TF_OperationGetControlInputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(
c_api.TF_OperationName(c_op)) for c_op in control_c_ops
]
# pylint: enable=protected-access
else:
return self._control_inputs
@property
def type(self):
"""The type of the op (e.g. `"MatMul"`)."""
if self._c_op:
op_type = c_api.TF_OperationOpType(self._c_op)
return op_type
else:
return self._node_def.op
@property
def graph(self):
"""The `Graph` that contains this operation."""
return self._graph
@property
def node_def(self):
# pylint: disable=line-too-long
"""Returns the `NodeDef` representation of this operation.
Returns:
A
[`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
if self._c_op:
with c_api_util.tf_buffer() as buf:
with errors.raise_exception_on_not_ok_status() as status:
c_api.TF_OperationToNodeDef(self._c_op, buf, status)
data = c_api.TF_GetBuffer(buf)
node_def = node_def_pb2.NodeDef()
node_def.ParseFromString(compat.as_bytes(data))
return node_def
else:
return self._node_def
@property
def op_def(self):
# pylint: disable=line-too-long
"""Returns the `OpDef` proto that represents the type of this op.
Returns:
An
[`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
if self._c_op:
with c_api_util.tf_buffer() as buf:
with errors.raise_exception_on_not_ok_status() as status:
# pylint: disable=protected-access
c_api.TF_GraphGetOpDef(self._graph._c_graph,
compat.as_bytes(self.type), buf, status)
# pylint: enable=protected-access
data = c_api.TF_GetBuffer(buf)
op_def = op_def_pb2.OpDef()
op_def.ParseFromString(compat.as_bytes(data))
return op_def
else:
return self._op_def
@property
def traceback(self):
"""Returns the call stack from when this operation was constructed."""
return self._graph._convert_stack(self._traceback) # pylint: disable=protected-access
@property
def traceback_with_start_lines(self):
"""Same as traceback but includes start line of function definition.
Returns:
A list of 5-tuples (filename, lineno, name, code, func_start_lineno).
"""
return self._graph._convert_stack( # pylint: disable=protected-access
self._traceback,
include_func_start_lineno=True)
def _set_attr(self, attr_name, attr_value):
"""Private method used to set an attribute in the node_def."""
if _USE_C_API:
buf = c_api.TF_NewBufferFromString(
compat.as_bytes(attr_value.SerializeToString()))
try:
with errors.raise_exception_on_not_ok_status() as status:
# pylint: disable=protected-access
c_api.SetAttr(self._graph._c_graph, self._c_op, attr_name, buf,
status)
# pylint: enable=protected-access
finally:
c_api.TF_DeleteBuffer(buf)
else:
self._node_def.attr[attr_name].CopyFrom(attr_value)
def get_attr(self, name):
"""Returns the value of the attr of this op with the given `name`.
Args:
name: The name of the attr to fetch.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ["s", "i", "f", "b", "type", "shape", "tensor", "func"]
if self._c_op:
try:
with c_api_util.tf_buffer() as buf:
with errors.raise_exception_on_not_ok_status() as status:
c_api.TF_OperationGetAttrValueProto(self._c_op, name, buf, status)
data = c_api.TF_GetBuffer(buf)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
x = attr_value_pb2.AttrValue()
x.ParseFromString(data)
else:
if name not in self._node_def.attr:
raise ValueError(
"No attr named '" + name + "' in " + str(self._node_def))
x = self._node_def.attr[name]
# Treat an empty oneof value as an empty list.
if not x.WhichOneof("value"):
return []
if x.HasField("list"):
for f in fields:
if getattr(x.list, f):
if f == "type":
return [dtypes.as_dtype(x) for x in list(getattr(x.list, f))]
else:
return list(getattr(x.list, f))
return []
else:
for f in fields:
if x.HasField(f):
if f == "type":
return dtypes.as_dtype(getattr(x, f))
else:
return getattr(x, f)
assert False, "Unsupported field type in " + str(x)
def run(self, feed_dict=None, session=None):
"""Runs this operation in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for this operation.
*N.B.* Before invoking `Operation.run()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See @{tf.Session.run}
for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
"""
_run_using_default_session(self, feed_dict, self.graph, session)
_gradient_registry = registry.Registry("gradient")
class RegisterGradient(object):
"""A decorator for registering the gradient function for an op type.
This decorator is only used when defining a new op type. For an op
with `m` inputs and `n` outputs, the gradient function is a function
that takes the original `Operation` and `n` `Tensor` objects
(representing the gradients with respect to each output of the op),
and returns `m` `Tensor` objects (representing the partial gradients
with respect to each input of the op).
For example, assuming that operations of type `"Sub"` take two
inputs `x` and `y`, and return a single output `x - y`, the
following gradient function would be registered:
```python
@tf.RegisterGradient("Sub")
def _sub_grad(unused_op, grad):
return grad, tf.negative(grad)
```
The decorator argument `op_type` is the string type of an
operation. This corresponds to the `OpDef.name` field for the proto
that defines the operation.
"""
def __init__(self, op_type):
"""Creates a new decorator with `op_type` as the Operation type.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers the function `f` as gradient function for `op_type`."""
_gradient_registry.register(f, self._op_type)
return f
def NotDifferentiable(op_type):
"""Specifies that ops of type `op_type` is not differentiable.
This function should *not* be used for operations that have a
well-defined gradient that is not yet implemented.
This function is only used when defining a new op type. It may be
used for ops such as `tf.size()` that are not differentiable. For
example:
```python
tf.NotDifferentiable("Size")
```
The gradient computed for 'op_type' will then propagate zeros.
For ops that have a well-defined gradient but are not yet implemented,
no declaration should be made, and an error *must* be thrown if
an attempt to request its gradient is made.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not a string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
_gradient_registry.register(None, op_type)
# Alias for the old name, will be eventually removed.
NoGradient = NotDifferentiable
def get_gradient_function(op):
"""Returns the function that computes gradients for "op"."""
if not op.inputs:
return None
try:
op_type = op.get_attr("_gradient_op_type")
except ValueError:
op_type = op.type
return _gradient_registry.lookup(op_type)
_shape_registry = registry.Registry("shape functions")
_default_shape_function_registry = registry.Registry("default shape functions")
# These are set to common_shapes.call_cpp_shape_fn by op generated code
# (generated by python_op_gen.cc).
# It is set outside ops.py to avoid a circular dependency.
_call_cpp_shape_fn = None
_call_cpp_shape_fn_and_require_op = None
def _set_call_cpp_shape_fn(call_cpp_shape_fn):
"""Sets default shape fns from passed common_shapes.call_cpp_shape_fn."""
global _call_cpp_shape_fn, _call_cpp_shape_fn_and_require_op
if _call_cpp_shape_fn:
return # already registered
def call_without_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=False)
_call_cpp_shape_fn = call_without_requiring
def call_with_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=True)
_call_cpp_shape_fn_and_require_op = call_with_requiring
class RegisterShape(object):
"""No longer used. Was: A decorator for registering a shape function.
Shape functions must now be registered via the SetShapeFn on the
original Op specification in C++.
"""
def __init__(self, op_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers "f" as the shape function for "op_type"."""
if f is None:
assert _call_cpp_shape_fn
# None is a special "weak" value that provides a default shape function,
# and can be overridden by a non-None registration.
try:
_default_shape_function_registry.register(_call_cpp_shape_fn,
self._op_type)
except KeyError:
# Ignore duplicate registrations of the weak value. This can
# occur if the op library input to wrapper generation
# inadvertently links in one or more of the standard op
# libraries.
pass
else:
_shape_registry.register(f, self._op_type)
return f
def set_shapes_for_outputs(op):
"""Uses the registered shape functions to set the shapes for op's outputs."""
try:
shape_func = _shape_registry.lookup(op.type)
except LookupError:
try:
shape_func = _default_shape_function_registry.lookup(op.type)
except LookupError:
shape_func = _call_cpp_shape_fn_and_require_op
shapes = shape_func(op)
if shapes is None:
raise RuntimeError(
"Shape function for op %s did not return any shapes" % op)
elif isinstance(shapes, dict):
# Returned by call_cpp_shape_fn
shapes_dict = shapes
shapes = shapes_dict["shapes"]
handle_datas = shapes_dict["handle_data"]
for output, handle_data in zip(op.outputs, handle_datas):
# pylint: disable=protected-access
output._handle_data = handle_data
# pylint: enable=protected-access
if len(op.outputs) != len(shapes):
raise RuntimeError(
"Shape function for op %s returned %d shapes but expected %d %s %s" %
(op, len(shapes), len(op.outputs), shape_func.__name__, str(shapes)))
for output, s in zip(op.outputs, shapes):
output.set_shape(s)
class OpStats(object):
"""A holder for statistics about an operator.
This class holds information about the resource requirements for an op,
including the size of its weight parameters on-disk and how many FLOPS it
requires to execute forward inference.
If you define a new operation, you can create a function that will return a
set of information about its usage of the CPU and disk space when serialized.
The function itself takes a Graph object that's been set up so you can call
methods like get_tensor_by_name to help calculate the results, and a NodeDef
argument.
"""
def __init__(self, statistic_type, value=None):
"""Sets up the initial placeholders for the statistics."""
self.statistic_type = statistic_type
self.value = value
@property
def statistic_type(self):
return self._statistic_type
@statistic_type.setter
def statistic_type(self, statistic_type):
self._statistic_type = statistic_type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __iadd__(self, other):
if other.statistic_type != self.statistic_type:
raise ValueError("Can't add an OpStat of type %s to one of %s." %
(self.statistic_type, other.statistic_type))
if self.value is None:
self.value = other.value
elif other.value is not None:
self._value += other.value
return self
_stats_registry = registry.Registry("statistical functions")
class RegisterStatistics(object):
"""A decorator for registering the statistics function for an op type.
This decorator can be defined for an op type so that it gives a
report on the resources used by an instance of an operator, in the
form of an OpStats object.
Well-known types of statistics include these so far:
- flops: When running a graph, the bulk of the computation happens doing
numerical calculations like matrix multiplications. This type allows a node
to return how many floating-point operations it takes to complete. The
total number of FLOPs for a graph is a good guide to its expected latency.
You can add your own statistics just by picking a new type string, registering
functions for the ops you care about, and then calling get_stats_for_node_def.
If a statistic for an op is registered multiple times, a KeyError will be
raised.
Since the statistics is counted on a per-op basis. It is not suitable for
model parameters (capacity), which is expected to be counted only once, even
if it is shared by multiple ops. (e.g. RNN)
For example, you can define a new metric called doohickey for a Foo operation
by placing this in your code:
```python
@ops.RegisterStatistics("Foo", "doohickey")
def _calc_foo_bojangles(unused_graph, unused_node_def):
return ops.OpStats("doohickey", 20)
```
Then in client code you can retrieve the value by making this call:
```python
doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey")
```
If the NodeDef is for an op with a registered doohickey function, you'll get
back the calculated amount in doohickey.value, or None if it's not defined.
"""
def __init__(self, op_type, statistic_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string.")
if "," in op_type:
raise TypeError("op_type must not contain a comma.")
self._op_type = op_type
if not isinstance(statistic_type, six.string_types):
raise TypeError("statistic_type must be a string.")
if "," in statistic_type:
raise TypeError("statistic_type must not contain a comma.")
self._statistic_type = statistic_type
def __call__(self, f):
"""Registers "f" as the statistics function for "op_type"."""
_stats_registry.register(f, self._op_type + "," + self._statistic_type)
return f
def get_stats_for_node_def(graph, node, statistic_type):
"""Looks up the node's statistics function in the registry and calls it.
This function takes a Graph object and a NodeDef from a GraphDef, and if
there's an associated statistics method, calls it and returns a result. If no
function has been registered for the particular node type, it returns an empty
statistics object.
Args:
graph: A Graph object that's been set up with the node's graph.
node: A NodeDef describing the operator.
statistic_type: A string identifying the statistic we're interested in.
Returns:
An OpStats object containing information about resource usage.
"""
try:
stats_func = _stats_registry.lookup(node.op + "," + statistic_type)
result = stats_func(graph, node)
except LookupError:
result = OpStats(statistic_type)
return result
def _name_from_scope_name(name):
"""Returns the name of an op given the name of its scope.
Args:
name: the name of the scope.
Returns:
the name of the op (equal to scope name minus any trailing slash).
"""
return name[:-1] if (name and name[-1] == "/") else name
class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
A `Graph` contains a set of
@{tf.Operation} objects,
which represent units of computation; and
@{tf.Tensor} objects, which represent
the units of data that flow between operations.
A default `Graph` is always registered, and accessible by calling
@{tf.get_default_graph}.
To add an operation to the default graph, simply call one of the functions
that defines a new `Operation`:
```python
c = tf.constant(4.0)
assert c.graph is tf.get_default_graph()
```
Another typical usage involves the
@{tf.Graph.as_default}
context manager, which overrides the current default graph for the
lifetime of the context:
```python
g = tf.Graph()
with g.as_default():
# Define operations and tensors in `g`.
c = tf.constant(30.0)
assert c.graph is g
```
Important note: This class *is not* thread-safe for graph construction. All
operations should be created from a single thread, or external
synchronization must be provided. Unless otherwise specified, all methods
are not thread-safe.
A `Graph` instance supports an arbitrary number of "collections"
that are identified by name. For convenience when building a large
graph, collections can store groups of related objects: for
example, the `tf.Variable` uses a collection (named
@{tf.GraphKeys.GLOBAL_VARIABLES}) for
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
"""
def __init__(self):
"""Creates a new, empty Graph."""
# Protects the core state that may be accessed by multiple readers.
# Only state that can be returned via public accessors (`as_graph_def()`,
# `get_operations()`, `as_graph_element()`, `get_collection()`, and
# `get_collection_ref()`) is by the lock. Thread-safety is provided on a
# best-effort basis to support buggy programs, and is not guaranteed by the
# public `tf.Graph` API.
# NOTE(mrry): This does not protect the various stacks. A warning will
# be reported if these are used from multiple threads
self._lock = threading.Lock()
self._nodes_by_id = dict() # GUARDED_BY(self._lock)
self._next_id_counter = 0 # GUARDED_BY(self._lock)
self._nodes_by_name = dict() # GUARDED_BY(self._lock)
self._version = 0 # GUARDED_BY(self._lock)
# Current name stack: uniquified names
self._name_stack = ""
# Maps a name used in the graph to the next id to use for that name.
self._names_in_use = {}
# Functions that will be applied to choose a device if none is specified.
self._device_function_stack = []
# Default original_op applied to new ops.
self._default_original_op = None
# Current control flow context. It could be either CondContext or
# WhileContext defined in ops/control_flow_ops.py
self._control_flow_context = None
# A new node will depend of the union of all of the nodes in the stack.
self._control_dependencies_stack = []
# Arbitrary collections of objects.
self._collections = {}
# The graph-level random seed
self._seed = None
# A dictionary of attributes that should be applied to all ops.
self._attr_scope_map = {}
# A map from op type to the kernel label that should be used.
self._op_to_kernel_label_map = {}
# A map from op type to an alternative op type that should be used when
# computing gradients.
self._gradient_override_map = {}
# True if the graph is considered "finalized". In that case no
# new operations can be added.
self._finalized = False
# Functions defined in the graph
self._functions = collections.OrderedDict()
# Default GraphDef versions
self._graph_def_versions = versions_pb2.VersionDef(
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)
self._building_function = False
# Stack of colocate_with ops
self._colocation_stack = []
# Set of tensors that are dangerous to feed!
self._unfeedable_tensors = set()
# Set of operations that are dangerous to fetch!
self._unfetchable_ops = set()
# A map of tensor handle placeholder to tensor dtype.
self._handle_feeders = {}
# A map from tensor handle to its read op.
self._handle_readers = {}
# A map from tensor handle to its move op.
self._handle_movers = {}
# A map from tensor handle to its delete op.
self._handle_deleters = {}
# Resource container.
if context.in_graph_mode():
self._container_prefix = ""
else:
# In Eager mode, isolate resources (particularly ResourceVariables) in
# Graphs by default. This prevents unintended variable sharing. Graph mode
# gets this kind of isolation from Sessions.
self._container_prefix = "eager-execution-%d/" % (uid(),)
self._container = self._container_prefix
self._registered_ops = op_def_registry.get_registered_ops()
# TODO(skyewm): fold as much of the above as possible into the C
# implementation
if _USE_C_API:
self._scoped_c_graph = c_api_util.ScopedTFGraph()
else:
self._scoped_c_graph = None
def _convert_stack(self, stack, include_func_start_lineno=False):
"""Converts a stack extracted using _extract_stack() to a traceback stack.
Args:
stack: A list of n 5-tuples,
(filename, lineno, name, frame_globals, func_start_lineno).
include_func_start_lineno: True if function start line number should be
included as the 5th entry in return tuples.
Returns:
A list of n 4-tuples or 5-tuples
(filename, lineno, name, code, [optional: func_start_lineno]), where the
code tuple element is calculated from the corresponding elements of the
input tuple.
"""
ret = []
for (filename, lineno, name, frame_globals, func_start_lineno,
unused_frame_info) in stack:
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, frame_globals)
if line:
line = line.strip()
else:
line = None
if include_func_start_lineno:
ret.append((filename, lineno, name, line, func_start_lineno))
else:
ret.append((filename, lineno, name, line))
return ret
def _extract_stack(self):
"""A lightweight, extensible re-implementation of traceback.extract_stack.
NOTE(mrry): traceback.extract_stack eagerly retrieves the line of code for
each stack frame using linecache, which results in an abundance of stat()
calls. This implementation does not retrieve the code, and any consumer
should apply _convert_stack to the result to obtain a traceback that can
be formatted etc. using traceback methods.
Derived classes can implement _extract_frame_info() to add extra information
to the traceback.
Returns:
A list of 6-tuples
(filename, lineno, name, frame_globals, func_start_lineno, custom_info)
corresponding to the call stack of the current thread.
"""
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
ret = []
while f is not None:
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
frame_globals = f.f_globals
func_start_lineno = co.co_firstlineno
frame_info = self._extract_frame_info(f)
ret.append((filename, lineno, name, frame_globals, func_start_lineno,
frame_info))
f = f.f_back
ret.reverse()
return ret
def _extract_frame_info(self, frame): # pylint: disable=unused-argument
"""Extracts custom information from a frame in an op traceback."""
return None
def _check_not_finalized(self):
"""Check if the graph is finalized.
Raises:
RuntimeError: If the graph finalized.
"""
if self._finalized:
raise RuntimeError("Graph is finalized and cannot be modified.")
def _add_op(self, op):
"""Adds 'op' to the graph.
Args:
op: the Operator or Tensor to add.
Raises:
TypeError: if op is not an Operation or Tensor.
ValueError: if the op.name or op._id are already used.
"""
self._check_not_finalized()
if not isinstance(op, (Tensor, Operation)):
raise TypeError("op must be a Tensor or Operation: %s" % op)
with self._lock:
# pylint: disable=protected-access
if op._id in self._nodes_by_id:
raise ValueError("cannot add an op with id %d as it already "
"exists in the graph" % op._id)
if op.name in self._nodes_by_name:
raise ValueError("cannot add op with name %s as that name "
"is already used" % op.name)
self._nodes_by_id[op._id] = op
self._nodes_by_name[op.name] = op
self._version = max(self._version, op._id)
# pylint: enable=protected-access
@property
def _c_graph(self):
if self._scoped_c_graph:
return self._scoped_c_graph.graph
return None
@property
def version(self):
"""Returns a version number that increases as ops are added to the graph.
Note that this is unrelated to the
@{tf.Graph.graph_def_versions}.
Returns:
An integer version that increases as ops are added to the graph.
"""
if self._finalized:
return self._version
with self._lock:
return self._version
@property
def graph_def_versions(self):
# pylint: disable=line-too-long
"""The GraphDef version information of this graph.
For details on the meaning of each version, see
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).
Returns:
A `VersionDef`.
"""
# pylint: enable=line-too-long
if self._c_graph:
with c_api_util.tf_buffer() as buf:
with errors.raise_exception_on_not_ok_status() as status:
c_api.TF_GraphVersions(self._c_graph, buf, status)
data = c_api.TF_GetBuffer(buf)
version_def = versions_pb2.VersionDef()
version_def.ParseFromString(compat.as_bytes(data))
return version_def
else:
return self._graph_def_versions
@property
def seed(self):
"""The graph-level random seed of this graph."""
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def finalized(self):
"""True if this graph has been finalized."""
return self._finalized
def finalize(self):
"""Finalizes this graph, making it read-only.
After calling `g.finalize()`, no new operations can be added to
`g`. This method is used to ensure that no operations are added
to a graph when it is shared between multiple threads, for example
when using a @{tf.train.QueueRunner}.
"""
self._finalized = True
def _unsafe_unfinalize(self):
"""Opposite of `finalize`. Internal interface.
NOTE: Unfinalizing a graph could have negative impact on performance,
especially in a multi-threaded environment. Unfinalizing a graph
when it is in use by a Session may lead to undefined behavior. Ensure
that all sessions using a graph are closed before calling this method.
"""
self._finalized = False
def _get_control_flow_context(self):
"""Returns the current control flow context.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
def _as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using @{tf.import_graph_def}) or used with the
[C++ Session API](../../../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef`
containing only the nodes that were added to this graph since
its `version` property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each
node with the inferred shapes of each of its outputs.
Returns:
A tuple containing a
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and the version of the graph to which that
`GraphDef` corresponds.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
with self._lock:
graph = graph_pb2.GraphDef()
graph.versions.CopyFrom(self._graph_def_versions)
bytesize = 0
for op_id in sorted(self._nodes_by_id):
op = self._nodes_by_id[op_id]
if from_version is None or op_id > from_version:
graph.node.extend([op.node_def])
if op.outputs and add_shapes:
assert "_output_shapes" not in graph.node[-1].attr
graph.node[-1].attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
bytesize += op.node_def.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
if self._functions:
for f in self._functions.values():
bytesize += f.definition.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph.library.function.extend([f.definition])
if f.grad_func_name:
grad_def = function_pb2.GradientDef()
grad_def.function_name = f.name
grad_def.gradient_func = f.grad_func_name
graph.library.gradient.extend([grad_def])
return graph, self._version
def as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using @{tf.import_graph_def}) or used with the
[C++ Session API](../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef`
containing only the nodes that were added to this graph since
its `version` property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each
node with the inferred shapes of each of its outputs.
Returns:
A
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
result, _ = self._as_graph_def(from_version, add_shapes)
return result
def _is_function(self, name):
"""Tests whether 'name' is registered in this graph's function library.
Args:
name: string op name.
Returns:
bool indicating whether or not 'name' is registered in function library.
"""
return name in self._functions
def _get_function(self, name):
"""Returns the function definition for 'name'.
Args:
name: string function name.
Returns:
The function def proto.
"""
return self._functions.get(name, None)
def _add_function(self, function):
"""Adds a function to the graph.
After the function has been added, you can call to the function by
passing the function name in place of an op name to
`Graph.create_op()`.
Args:
function: A `_DefinedFunction` object.
Raises:
ValueError: if another function is defined with the same name.
"""
name = function.name
# Sanity checks on gradient definition.
if (function.grad_func_name is not None) and (function.python_grad_func is
not None):
raise ValueError("Gradient defined twice for function %s" % name)
# Add function to graph
# pylint: disable=protected-access
if self._c_graph:
assert function._c_func, (
"Cannot add function created without C API support to graph "
"created with C API support")
with errors.raise_exception_on_not_ok_status() as status:
gradient = function._grad_func._c_func if function._grad_func else None
c_api.TF_GraphCopyFunction(self._c_graph, function._c_func, gradient,
status)
else:
# If there is already a function with the same name, raise an error
# if bodies are different. Else, do nothing. The C API version above
# has the same behavior.
previous = self._functions.get(name, None)
if previous:
# This check is not ideal as we can have a hash collision with only
# 32 bits in the hash, but the non C API mode is being deprecated.
# Don't bother changing it now.
if previous._hash_str == function._hash_str:
return
else:
raise ValueError("Cannot add function (%s, hash %s) to graph (%s). "
"Another function (%s, hash %s) is already defined "
"with that name (%s)" % (
function, function._hash_str, self,
previous, previous._hash_str, name))
# pylint: enable=protected-access
self._functions[name] = function
# Need a new-enough consumer to support the functions we add to the graph.
if self._graph_def_versions.min_consumer < 12:
self._graph_def_versions.min_consumer = 12
@property
def building_function(self):
"""Returns True iff this graph represents a function."""
return self._building_function
# Helper functions to create operations.
def create_op(
self,
op_type,
inputs,
dtypes, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Creates an `Operation` in this graph.
This is a low-level interface for creating an `Operation`. Most
programs will not call this method directly, and instead use the
Python op constructors, such as `tf.constant()`, which add ops to
the default graph.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: A list of `DType` objects that will be the types of the tensors
that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) If True, shape inference will be performed
to compute the shapes of the outputs.
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Raises:
TypeError: if any of the inputs is not a `Tensor`.
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
for idx, a in enumerate(inputs):
if not isinstance(a, Tensor):
raise TypeError("Input #%d is not a tensor: %s" % (idx, a))
if name is None:
name = op_type
# If a names ends with a '/' it is a "name scope" and we use it as-is,
# after removing the trailing '/'.
if name and name[-1] == "/":
name = _name_from_scope_name(name)
else:
name = self.unique_name(name)
node_def = _NodeDef(op_type, name, device=None, attrs=attrs)
input_ops = set([t.op for t in inputs])
control_inputs = self._control_dependencies_for_inputs(input_ops)
ret = Operation(
node_def,
self,
inputs=inputs,
output_types=dtypes,
control_inputs=control_inputs,
input_types=input_types,
original_op=self._default_original_op,
op_def=op_def)
if compute_shapes:
set_shapes_for_outputs(ret)
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_from_tf_operation(self, c_op):
"""Creates an `Operation` in this graph from the supplied TF_Operation.
This method is like create_op() except the new Operation is constructed
using `c_op`. The returned Operation will have `c_op` as its _c_op
field. This is used to create Operation objects around TF_Operations created
indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile).
Args:
c_op: a wrapped TF_Operation
Returns:
An `Operation` object.
"""
self._check_not_finalized()
tf_outputs = c_api.GetOperationInputs(c_op)
input_ops = set(self._get_operation_by_tf_operation(output.oper)
for output in tf_outputs)
control_inputs = self._control_dependencies_for_inputs(input_ops)
ret = Operation(c_op, self, control_inputs=control_inputs)
self._create_op_helper(ret)
return ret
def _create_op_helper(self, op, compute_device=True):
"""Common logic for creating an op in this graph."""
# Apply any additional attributes requested. Do not overwrite any existing
# attributes.
for key, value in self._attr_scope_map.items():
try:
op.get_attr(key)
except ValueError:
if callable(value):
value = value(op.node_def)
if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):
raise TypeError(
"Callable for scope map key '%s' must return either None or "
"an AttrValue protocol buffer; but it returned: %s" % (key,
value))
if value:
op._set_attr(key, value) # pylint: disable=protected-access
# Apply a kernel label if one has been specified for this op type.
try:
kernel_label = self._op_to_kernel_label_map[op.type]
op._set_attr("_kernel", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))
except KeyError:
pass
# Apply the overriding op type for gradients if one has been specified for
# this op type.
try:
mapped_op_type = self._gradient_override_map[op.type]
op._set_attr("_gradient_op_type", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))
except KeyError:
pass
self._record_op_seen_by_control_dependencies(op)
if compute_device:
self._apply_device_functions(op)
if self._colocation_stack:
all_colocation_groups = []
for colocation_op in self._colocation_stack:
all_colocation_groups.extend(colocation_op.colocation_groups())
if colocation_op.device:
# Make this device match the device of the colocated op, to provide
# consistency between the device and the colocation property.
if (op.device and pydev.canonical_name(op.device) !=
pydev.canonical_name(colocation_op.device)):
logging.warning("Tried to colocate %s with an op %s that had "
"a different device: %s vs %s. "
"Ignoring colocation property.", op.name,
colocation_op.name, op.device,
colocation_op.device)
else:
op._set_device(colocation_op.device) # pylint: disable=protected-access
all_colocation_groups = sorted(set(all_colocation_groups))
# pylint: disable=protected-access
op._set_attr("_class", attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))
# pylint: enable=protected-access
# Sets "container" attribute if
# (1) self._container is not None
# (2) "is_stateful" is set in OpDef
# (3) "container" attribute is in OpDef
# (4) "container" attribute is None
if (self._container and op.type in self._registered_ops and
self._registered_ops[op.type].is_stateful):
try:
container_attr = op.get_attr("container")
except ValueError:
# "container" attribute is not in OpDef
pass
else:
if not container_attr:
op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access
s=compat.as_bytes(self._container)))
def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):
"""Returns the object referred to by `obj`, as an `Operation` or `Tensor`.
This function validates that `obj` represents an element of this
graph, and gives an informative error message if it is not.
This function is the canonical way to get/validate an object of
one of the allowed types from an external argument reference in the
Session API.
This method may be called concurrently from multiple threads.
Args:
obj: A `Tensor`, an `Operation`, or the name of a tensor or operation.
Can also be any object with an `_as_graph_element()` method that returns
a value of one of these types.
allow_tensor: If true, `obj` may refer to a `Tensor`.
allow_operation: If true, `obj` may refer to an `Operation`.
Returns:
The `Tensor` or `Operation` in the Graph corresponding to `obj`.
Raises:
TypeError: If `obj` is not a type we support attempting to convert
to types.
ValueError: If `obj` is of an appropriate type but invalid. For
example, an invalid string.
KeyError: If `obj` is not an object in the graph.
"""
if self._finalized:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
with self._lock:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):
"""See `Graph.as_graph_element()` for details."""
# The vast majority of this function is figuring
# out what an API user might be doing wrong, so
# that we can give helpful error messages.
#
# Ideally, it would be nice to split it up, but we
# need context to generate nice error messages.
if allow_tensor and allow_operation:
types_str = "Tensor or Operation"
elif allow_tensor:
types_str = "Tensor"
elif allow_operation:
types_str = "Operation"
else:
raise ValueError("allow_tensor and allow_operation can't both be False.")
temp_obj = _as_graph_element(obj)
if temp_obj is not None:
obj = temp_obj
# If obj appears to be a name...
if isinstance(obj, compat.bytes_or_text_types):
name = compat.as_str(obj)
if ":" in name and allow_tensor:
# Looks like a Tensor name and can be a Tensor.
try:
op_name, out_n = name.split(":")
out_n = int(out_n)
except:
raise ValueError("The name %s looks a like a Tensor name, but is "
"not a valid one. Tensor names must be of the "
"form \"<op_name>:<output_index>\"." % repr(name))
if op_name in self._nodes_by_name:
op = self._nodes_by_name[op_name]
else:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, does not exist in the "
"graph." % (repr(name), repr(op_name)))
try:
return op.outputs[out_n]
except:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, exists but only has "
"%s outputs." % (repr(name), repr(op_name),
len(op.outputs)))
elif ":" in name and not allow_tensor:
# Looks like a Tensor name but can't be a Tensor.
raise ValueError("Name %s appears to refer to a Tensor, not a %s." %
(repr(name), types_str))
elif ":" not in name and allow_operation:
# Looks like an Operation name and can be an Operation.
if name not in self._nodes_by_name:
raise KeyError("The name %s refers to an Operation not in the "
"graph." % repr(name))
return self._nodes_by_name[name]
elif ":" not in name and not allow_operation:
# Looks like an Operation name but can't be an Operation.
if name in self._nodes_by_name:
# Yep, it's an Operation name
err_msg = ("The name %s refers to an Operation, not a %s." %
(repr(name), types_str))
else:
err_msg = ("The name %s looks like an (invalid) Operation name, "
"not a %s." % (repr(name), types_str))
err_msg += (" Tensor names must be of the form "
"\"<op_name>:<output_index>\".")
raise ValueError(err_msg)
elif isinstance(obj, Tensor) and allow_tensor:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Tensor %s is not an element of this graph." % obj)
return obj
elif isinstance(obj, Operation) and allow_operation:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Operation %s is not an element of this graph." % obj)
return obj
else:
# We give up!
raise TypeError("Can not convert a %s into a %s." % (type(obj).__name__,
types_str))
def get_operations(self):
"""Return the list of operations in the graph.
You can modify the operations in place, but modifications
to the list such as inserts/delete have no effect on the
list of operations known to the graph.
This method may be called concurrently from multiple threads.
Returns:
A list of Operations.
"""
if self._finalized:
return list(self._nodes_by_id.values())
with self._lock:
return list(self._nodes_by_id.values())
def get_operation_by_name(self, name):
"""Returns the `Operation` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to an operation in this graph.
"""
if not isinstance(name, six.string_types):
raise TypeError("Operation names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=False, allow_operation=True)
def _get_operation_by_name_unsafe(self, name):
"""Returns the `Operation` with the given `name`.
This is a internal unsafe version of get_operation_by_name. It skips many
checks and does not have user friedly error messages but runs considerably
faster. This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
KeyError: If `name` does not correspond to an operation in this graph.
"""
if self._finalized:
return self._nodes_by_name[name]
with self._lock:
return self._nodes_by_name[name]
def _get_operation_by_tf_operation(self, tf_oper):
op_name = c_api.TF_OperationName(tf_oper)
return self._get_operation_by_name_unsafe(op_name)
def get_tensor_by_name(self, name):
"""Returns the `Tensor` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Tensor` to return.
Returns:
The `Tensor` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to a tensor in this graph.
"""
# Names should be strings.
if not isinstance(name, six.string_types):
raise TypeError("Tensor names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
def _get_tensor_by_tf_output(self, tf_output):
"""Returns the `Tensor` representing `tf_output`.
Note that there is only one such `Tensor`, i.e. multiple calls to this
function with the same TF_Output value will always return the same `Tensor`
object.
Args:
tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`).
Returns:
The `Tensor` that represents `tf_output`.
"""
op = self._get_operation_by_tf_operation(tf_output.oper)
return op.outputs[tf_output.index]
def _next_id(self):
"""Id for next Operation instance. Also increments the internal id."""
self._check_not_finalized()
with self._lock:
self._next_id_counter += 1
return self._next_id_counter
@property
def _last_id(self):
return self._next_id_counter
def as_default(self):
"""Returns a context manager that makes this `Graph` the default graph.
This method should be used if you want to create multiple graphs
in the same process. For convenience, a global default graph is
provided, and all ops will be added to this graph if you do not
create a new graph explicitly. Use this method with the `with` keyword
to specify that ops created within the scope of a block should be
added to this graph.
The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
The following code examples are equivalent:
```python
# 1. Using Graph.as_default():
g = tf.Graph()
with g.as_default():
c = tf.constant(5.0)
assert c.graph is g
# 2. Constructing and making default:
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
assert c.graph is g
```
Returns:
A context manager for using this graph as the default graph.
"""
return _default_graph_stack.get_controller(self)
@property
def collections(self):
"""Returns the names of the collections known to this graph."""
return list(self._collections)
def add_to_collection(self, name, value):
"""Stores `value` in the collection with the given `name`.
Note that collections are not sets, so it is possible to add a value to
a collection several times.
Args:
name: The key for the collection. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection.
""" # pylint: disable=g-doc-exception
_assert_collection_is_ok(name)
self._check_not_finalized()
with self._lock:
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
"""Stores `value` in the collections given by `names`.
Note that collections are not sets, so it is possible to add a value to
a collection several times. This function makes sure that duplicates in
`names` are ignored, but it will not check for pre-existing membership of
`value` in any of the collections in `names`.
`names` can be any iterable, but if `names` is a string, it is treated as a
single collection name.
Args:
names: The keys for the collections to add to. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
# Make sure names are unique, but treat strings as a single collection name
names = (names,) if isinstance(names, six.string_types) else set(names)
for name in names:
self.add_to_collection(name, value)
def get_collection_ref(self, name):
"""Returns a list of values in the collection with the given `name`.
If the collection exists, this returns the list itself, which can
be modified in place to change the collection. If the collection does
not exist, it is created as an empty list and the list is returned.
This is different from `get_collection()` which always returns a copy of
the collection list if it exists and never creates an empty collection.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection.
""" # pylint: disable=g-doc-exception
_assert_collection_is_ok(name)
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
"""Returns a list of values in the collection with the given `name`.
This is different from `get_collection_ref()` which always returns the
actual collection list if it exists in that it returns a new list each time
it is called.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
""" # pylint: disable=g-doc-exception
_assert_collection_is_ok(name)
with self._lock:
collection = self._collections.get(name, None)
if collection is None:
return []
if scope is None:
return list(collection)
else:
c = []
regex = re.compile(scope)
for item in collection:
if hasattr(item, "name") and regex.match(item.name):
c.append(item)
return c
def get_all_collection_keys(self):
"""Returns a list of collections used in this graph."""
with self._lock:
return [x for x in self._collections if isinstance(x, six.string_types)]
def clear_collection(self, name):
"""Clears all values in a collection.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
"""
self._check_not_finalized()
with self._lock:
if name in self._collections:
del self._collections[name]
@tf_contextlib.contextmanager
def _original_op(self, op):
"""Python 'with' handler to help annotate ops with their originator.
An op may have an 'original_op' property that indicates the op on which
it was based. For example a replica op is based on the op that was
replicated and a gradient op is based on the op that was differentiated.
All ops created in the scope of this 'with' handler will have
the given 'op' as their original op.
Args:
op: The Operation that all ops created in this scope will have as their
original op.
Yields:
Nothing.
"""
old_original_op = self._default_original_op
try:
self._default_original_op = op
yield
finally:
self._default_original_op = old_original_op
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_contextlib.contextmanager
def name_scope(self, name):
r"""Returns a context manager that creates hierarchical names for operations.
A graph maintains a stack of name scopes. A `with name_scope(...):`
statement pushes a new name onto the stack for the lifetime of the context.
The `name` argument will be interpreted as follows:
* A string (not ending with '/') will create a new name scope, in which
`name` is appended to the prefix of all operations created in the
context. If `name` has been used before, it will be made unique by
calling `self.unique_name(name)`.
* A scope previously captured from a `with g.name_scope(...) as
scope:` statement will be treated as an "absolute" name scope, which
makes it possible to re-enter existing scopes.
* A value of `None` or the empty string will reset the current name scope
to the top-level (empty) name scope.
For example:
```python
with tf.Graph().as_default() as g:
c = tf.constant(5.0, name="c")
assert c.op.name == "c"
c_1 = tf.constant(6.0, name="c")
assert c_1.op.name == "c_1"
# Creates a scope called "nested"
with g.name_scope("nested") as scope:
nested_c = tf.constant(10.0, name="c")
assert nested_c.op.name == "nested/c"
# Creates a nested scope called "inner".
with g.name_scope("inner"):
nested_inner_c = tf.constant(20.0, name="c")
assert nested_inner_c.op.name == "nested/inner/c"
# Create a nested scope called "inner_1".
with g.name_scope("inner"):
nested_inner_1_c = tf.constant(30.0, name="c")
assert nested_inner_1_c.op.name == "nested/inner_1/c"
# Treats `scope` as an absolute name scope, and
# switches to the "nested/" scope.
with g.name_scope(scope):
nested_d = tf.constant(40.0, name="d")
assert nested_d.op.name == "nested/d"
with g.name_scope(""):
e = tf.constant(50.0, name="e")
assert e.op.name == "e"
```
The name of the scope itself can be captured by `with
g.name_scope(...) as scope:`, which stores the name of the scope
in the variable `scope`. This value can be used to name an
operation that represents the overall result of executing the ops
in a scope. For example:
```python
inputs = tf.constant(...)
with g.name_scope('my_layer') as scope:
weights = tf.Variable(..., name="weights")
biases = tf.Variable(..., name="biases")
affine = tf.matmul(inputs, weights) + biases
output = tf.nn.relu(affine, name=scope)
```
NOTE: This constructor validates the given `name`. Valid scope
names match one of the following regular expressions:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root)
[A-Za-z0-9_.\\-/]* (for other scopes)
Args:
name: A name for the scope.
Returns:
A context manager that installs `name` as a new name scope.
Raises:
ValueError: If `name` is not a valid scope name, according to the rules
above.
"""
if name:
if self._name_stack:
# Scopes created in a nested scope may have initial characters
# that are illegal as the initial character of an op name
# (viz. '-', '\', '/', and '_').
if not _VALID_SCOPE_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
else:
# Scopes created in the root must match the more restrictive
# op name regex, which constrains the initial character.
if not _VALID_OP_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
try:
old_stack = self._name_stack
if not name: # Both for name=None and name="" we re-set to empty scope.
new_stack = None
elif name[-1] == "/":
new_stack = _name_from_scope_name(name)
else:
new_stack = self.unique_name(name)
self._name_stack = new_stack
yield "" if new_stack is None else new_stack + "/"
finally:
self._name_stack = old_stack
# pylint: enable=g-doc-return-or-yield,line-too-long
def unique_name(self, name, mark_as_used=True):
"""Return a unique operation name for `name`.
Note: You rarely need to call `unique_name()` directly. Most of
the time you just need to create `with g.name_scope()` blocks to
generate structured names.
`unique_name` is used to generate structured names, separated by
`"/"`, to help identify operations when debugging a graph.
Operation names are displayed in error messages reported by the
TensorFlow runtime, and in various visualization tools such as
TensorBoard.
If `mark_as_used` is set to `True`, which is the default, a new
unique name is created and marked as in use. If it's set to `False`,
the unique name is returned without actually being marked as used.
This is useful when the caller simply wants to know what the name
to be created will be.
Args:
name: The name for an operation.
mark_as_used: Whether to mark this name as being used.
Returns:
A string to be passed to `create_op()` that will be used
to name the operation being created.
"""
if self._name_stack:
name = self._name_stack + "/" + name
i = self._names_in_use.get(name, 0)
# Increment the number for "name".
if mark_as_used:
self._names_in_use[name] = i + 1
if i > 0:
base_name = name
# Make sure the composed name is not already used.
while name in self._names_in_use:
name = "%s_%d" % (base_name, i)
i += 1
# Mark the composed name as used in case someone wants
# to call unique_name("name_1").
if mark_as_used:
self._names_in_use[name] = 1
return name
def get_name_scope(self):
"""Returns the current name scope.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_default_graph().get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
return self._name_stack
@tf_contextlib.contextmanager
def colocate_with(self, op, ignore_existing=False):
"""Returns a context manager that specifies an op to colocate with.
Note: this function is not for public use, only for internal libraries.
For example:
```python
a = tf.Variable([1.0])
with g.colocate_with(a):
b = tf.constant(1.0)
c = tf.add(a, b)
```
`b` and `c` will always be colocated with `a`, no matter where `a`
is eventually placed.
**NOTE** Using a colocation scope resets any existing device constraints.
If `op` is `None` then `ignore_existing` must be `True` and the new
scope resets all colocation and device constraints.
Args:
op: The op to colocate all created ops with, or `None`.
ignore_existing: If true, only applies colocation of this op within
the context, rather than applying all colocation properties
on the stack. If `op` is `None`, this value must be `True`.
Raises:
ValueError: if op is None but ignore_existing is False.
Yields:
A context manager that specifies the op with which to colocate
newly created ops.
"""
if op is None and not ignore_existing:
raise ValueError("Trying to reset colocation (op is None) but "
"ignore_existing is not True")
if op is not None and not isinstance(op, Operation):
# We always want to colocate with the reference op.
op = internal_convert_to_tensor_or_indexed_slices(op, as_ref=True).op
# By default, colocate_with resets the device function stack,
# since colocate_with is typically used in specific internal
# library functions where colocation is intended to be "stronger"
# than device functions.
#
# In the future, a caller may specify that device_functions win
# over colocation, in which case we can add support.
device_fn_tmp = self._device_function_stack
self._device_function_stack = []
if ignore_existing:
current_stack = self._colocation_stack
self._colocation_stack = []
if op is not None:
self._colocation_stack.append(op)
try:
yield
finally:
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
self._colocation_stack.pop()
# Reset the colocation stack if requested.
if ignore_existing:
self._colocation_stack = current_stack
@tf_contextlib.contextmanager
def device(self, device_name_or_function):
# pylint: disable=line-too-long
"""Returns a context manager that specifies the default device to use.
The `device_name_or_function` argument may either be a device name
string, a device function, or None:
* If it is a device name string, all operations constructed in
this context will be assigned to the device with that name, unless
overridden by a nested `device()` context.
* If it is a function, it will be treated as a function from
Operation objects to device name strings, and invoked each time
a new Operation is created. The Operation will be assigned to
the device with the returned name.
* If it is None, all `device()` invocations from the enclosing context
will be ignored.
For information about the valid syntax of device name strings, see
the documentation in
[`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).
For example:
```python
with g.device('/device:GPU:0'):
# All operations constructed in this context will be placed
# on GPU 0.
with g.device(None):
# All operations constructed in this context will have no
# assigned device.
# Defines a function from `Operation` to device string.
def matmul_on_gpu(n):
if n.type == "MatMul":
return "/device:GPU:0"
else:
return "/cpu:0"
with g.device(matmul_on_gpu):
# All operations of type "MatMul" constructed in this context
# will be placed on GPU 0; all other operations will be placed
# on CPU 0.
```
**N.B.** The device scope may be overridden by op wrappers or
other library code. For example, a variable assignment op
`v.assign()` must be colocated with the `tf.Variable` `v`, and
incompatible device scopes will be ignored.
Args:
device_name_or_function: The device name or function to use in
the context.
Yields:
A context manager that specifies the default device to use for newly
created ops.
"""
# pylint: enable=line-too-long
if (device_name_or_function is not None and
not callable(device_name_or_function)):
device_function = pydev.merge_device(device_name_or_function)
else:
device_function = device_name_or_function
try:
self._device_function_stack.append(device_function)
yield
finally:
self._device_function_stack.pop()
def _apply_device_functions(self, op):
"""Applies the current device function stack to the given operation."""
# Apply any device functions in reverse order, so that the most recently
# pushed function has the first chance to apply a device to the op.
# We apply here because the result can depend on the Operation's
# signature, which is computed in the Operation constructor.
for device_function in reversed(self._device_function_stack):
if device_function is None:
break
op._set_device(device_function(op)) # pylint: disable=protected-access
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Stateful operations, such as variables and queues, can maintain their
states on devices so that they can be shared by multiple processes.
A resource container is a string name under which these stateful
operations are tracked. These resources can be released or cleared
with `tf.Session.reset()`.
For example:
```python
with g.container('experiment0'):
# All stateful Operations constructed in this context will be placed
# in resource container "experiment0".
v1 = tf.Variable([1.0])
v2 = tf.Variable([2.0])
with g.container("experiment1"):
# All stateful Operations constructed in this context will be
# placed in resource container "experiment1".
v3 = tf.Variable([3.0])
q1 = tf.FIFOQueue(10, tf.float32)
# All stateful Operations constructed in this context will be
# be created in the "experiment0".
v4 = tf.Variable([4.0])
q1 = tf.FIFOQueue(20, tf.float32)
with g.container(""):
# All stateful Operations constructed in this context will be
# be placed in the default resource container.
v5 = tf.Variable([5.0])
q3 = tf.FIFOQueue(30, tf.float32)
# Resets container "experiment0", after which the state of v1, v2, v4, q1
# will become undefined (such as uninitialized).
tf.Session.reset(target, ["experiment0"])
```
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
try:
self._container = self._container_prefix + container_name
yield self._container
finally:
self._container = original_container
# pylint: enable=g-doc-return-or-yield
class _ControlDependenciesController(object):
"""Context manager for `control_dependencies()`."""
def __init__(self, graph, control_inputs):
"""Create a new `_ControlDependenciesController`.
A `_ControlDependenciesController` is the context manager for
`with tf.control_dependencies()` blocks. These normally nest,
as described in the documentation for `control_dependencies()`.
The `control_inputs` argument list control dependencies that must be
added to the current set of control dependencies. Because of
uniquification the set can be empty even if the caller passed a list of
ops. The special value `None` indicates that we want to start a new
empty set of control dependencies instead of extending the current set.
In that case we also clear the current control flow context, which is an
additional mechanism to add control dependencies.
Args:
graph: The graph that this controller is managing.
control_inputs: List of ops to use as control inputs in addition
to the current control dependencies. None to indicate that
the dependencies should be cleared.
"""
self._graph = graph
if control_inputs is None:
self._control_inputs = []
self._new_stack = True
else:
self._control_inputs = control_inputs
self._new_stack = False
self._seen_nodes = set()
self._old_stack = None
self._old_control_flow_context = None
# pylint: disable=protected-access
def __enter__(self):
if self._new_stack:
# Clear the control_dependencies graph.
self._old_stack = self._graph._control_dependencies_stack
self._graph._control_dependencies_stack = []
# Clear the control_flow_context too.
self._old_control_flow_context = self._graph._get_control_flow_context()
self._graph._set_control_flow_context(None)
self._graph._push_control_dependencies_controller(self)
def __exit__(self, unused_type, unused_value, unused_traceback):
self._graph._pop_control_dependencies_controller(self)
if self._new_stack:
self._graph._control_dependencies_stack = self._old_stack
self._graph._set_control_flow_context(self._old_control_flow_context)
# pylint: enable=protected-access
@property
def control_inputs(self):
return self._control_inputs
def add_op(self, op):
self._seen_nodes.add(op)
def op_in_group(self, op):
return op in self._seen_nodes
def _push_control_dependencies_controller(self, controller):
self._control_dependencies_stack.append(controller)
def _pop_control_dependencies_controller(self, controller):
assert self._control_dependencies_stack[-1] is controller
self._control_dependencies_stack.pop()
def _current_control_dependencies(self):
ret = set()
for controller in self._control_dependencies_stack:
for op in controller.control_inputs:
ret.add(op)
return ret
def _control_dependencies_for_inputs(self, input_ops):
"""For an op that takes `input_ops` as inputs, compute control inputs.
The returned control dependencies should yield an execution that
is equivalent to adding all control inputs in
self._control_dependencies_stack to a newly created op. However,
this function attempts to prune the returned control dependencies
by observing that nodes created within the same `with
control_dependencies(...):` block may have data dependencies that make
the explicit approach redundant.
Args:
input_ops: The data input ops for an op to be created.
Returns:
A list of control inputs for the op to be created.
"""
ret = []
for controller in self._control_dependencies_stack:
# If any of the input_ops already depends on the inputs from controller,
# we say that the new op is dominated (by that input), and we therefore
# do not need to add control dependencies for this controller's inputs.
dominated = False
for op in input_ops:
if controller.op_in_group(op):
dominated = True
break
if not dominated:
# Don't add a control input if we already have a data dependency on i.
# NOTE(mrry): We do not currently track transitive data dependencies,
# so we may add redundant control inputs.
ret.extend([c for c in controller.control_inputs if c not in input_ops])
return ret
def _record_op_seen_by_control_dependencies(self, op):
"""Record that the given op depends on all registered control dependencies.
Args:
op: An Operation.
"""
for controller in self._control_dependencies_stack:
controller.add_op(op)
def control_dependencies(self, control_inputs):
"""Returns a context manager that specifies control dependencies.
Use with the `with` keyword to specify that all operations constructed
within the context should have control dependencies on
`control_inputs`. For example:
```python
with g.control_dependencies([a, b, c]):
# `d` and `e` will only run after `a`, `b`, and `c` have executed.
d = ...
e = ...
```
Multiple calls to `control_dependencies()` can be nested, and in
that case a new `Operation` will have control dependencies on the union
of `control_inputs` from all active contexts.
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `a`, `b`, `c`, and `d`.
```
You can pass None to clear the control dependencies:
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies(None):
# Ops constructed here run normally, not waiting for either `a` or `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `c` and `d`, also not waiting
# for either `a` or `b`.
```
*N.B.* The control dependencies context applies *only* to ops that
are constructed within the context. Merely using an op or tensor
in the context does not add a control dependency. The following
example illustrates this point:
```python
# WRONG
def my_func(pred, tensor):
t = tf.matmul(tensor, tensor)
with tf.control_dependencies([pred]):
# The matmul op is created outside the context, so no control
# dependency will be added.
return t
# RIGHT
def my_func(pred, tensor):
with tf.control_dependencies([pred]):
# The matmul op is created in the context, so a control dependency
# will be added.
return tf.matmul(tensor, tensor)
```
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return self._ControlDependenciesController(self, None)
# First convert the inputs to ops, and deduplicate them.
# NOTE(mrry): Other than deduplication, we do not currently track direct
# or indirect dependencies between control_inputs, which may result in
# redundant control inputs.
control_ops = []
current = self._current_control_dependencies()
for c in control_inputs:
if isinstance(c, IndexedSlices):
c = c.op
c = self.as_graph_element(c)
if isinstance(c, Tensor):
c = c.op
elif not isinstance(c, Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
if c not in current:
control_ops.append(c)
current.add(c)
return self._ControlDependenciesController(self, control_ops)
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _attr_scope(self, attr_map):
"""EXPERIMENTAL: A context manager for setting attributes on operators.
This context manager can be used to add additional
attributes to operators within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # No extra attributes
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}):
f_2 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}):
f_3 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": None}):
f_4 = Foo() # No additional attributes.
Args:
attr_map: A dictionary mapping attr name strings to
AttrValue protocol buffers or None.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If attr_map is not a dictionary mapping
strings to AttrValue protobufs.
"""
if not isinstance(attr_map, dict):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers")
# The saved_attrs dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_attrs = {}
# Install the given attribute
for name, attr in attr_map.items():
if not (isinstance(name, six.string_types) and
(isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or
callable(attr))):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers or "
"callables that emit AttrValue protocol buffers")
try:
saved_attrs[name] = self._attr_scope_map[name]
except KeyError:
pass
if attr is None:
del self._attr_scope_map[name]
else:
self._attr_scope_map[name] = attr
try:
yield # The code within the context runs here.
finally:
# Remove the attributes set for this context, and restore any saved
# attributes.
for name, attr in attr_map.items():
try:
self._attr_scope_map[name] = saved_attrs[name]
except KeyError:
del self._attr_scope_map[name]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _kernel_label_map(self, op_to_kernel_label_map):
"""EXPERIMENTAL: A context manager for setting kernel labels.
This context manager can be used to select particular
implementations of kernels within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # Uses the default registered kernel for the Foo op.
with g.kernel_label_map({"Foo": "v_2"}):
f_2 = Foo() # Uses the registered kernel with label "v_2"
# for the Foo op.
with g.kernel_label_map({"Foo": "v_3"}):
f_3 = Foo() # Uses the registered kernel with label "v_3"
# for the Foo op.
with g.kernel_label_map({"Foo": ""}):
f_4 = Foo() # Uses the default registered kernel
# for the Foo op.
Args:
op_to_kernel_label_map: A dictionary mapping op type strings to
kernel label strings.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If op_to_kernel_label_map is not a dictionary mapping
strings to strings.
"""
if not isinstance(op_to_kernel_label_map, dict):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
# The saved_labels dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_labels = {}
# Install the given label
for op_type, label in op_to_kernel_label_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(label, six.string_types)):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
try:
saved_labels[op_type] = self._op_to_kernel_label_map[op_type]
except KeyError:
pass
self._op_to_kernel_label_map[op_type] = label
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, label in op_to_kernel_label_map.items():
try:
self._op_to_kernel_label_map[op_type] = saved_labels[op_type]
except KeyError:
del self._op_to_kernel_label_map[op_type]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def gradient_override_map(self, op_type_map):
"""EXPERIMENTAL: A context manager for overriding gradient functions.
This context manager can be used to override the gradient function
that will be used for ops within the scope of the context.
For example:
```python
@tf.RegisterGradient("CustomSquare")
def _custom_square_grad(op, grad):
# ...
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
s_1 = tf.square(c) # Uses the default gradient for tf.square.
with g.gradient_override_map({"Square": "CustomSquare"}):
s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the
# gradient of s_2.
```
Args:
op_type_map: A dictionary mapping op type strings to alternative op
type strings.
Returns:
A context manager that sets the alternative op type to be used for one
or more ops created in that context.
Raises:
TypeError: If `op_type_map` is not a dictionary mapping strings to
strings.
"""
if not isinstance(op_type_map, dict):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
# The saved_mappings dictionary stores any currently-set mappings that
# will be overridden by this context manager.
saved_mappings = {}
# Install the given label
for op_type, mapped_op_type in op_type_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(mapped_op_type, six.string_types)):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
try:
saved_mappings[op_type] = self._gradient_override_map[op_type]
except KeyError:
pass
self._gradient_override_map[op_type] = mapped_op_type
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, mapped_op_type in op_type_map.items():
try:
self._gradient_override_map[op_type] = saved_mappings[op_type]
except KeyError:
del self._gradient_override_map[op_type]
# pylint: enable=g-doc-return-or-yield
def prevent_feeding(self, tensor):
"""Marks the given `tensor` as unfeedable in this graph."""
self._unfeedable_tensors.add(tensor)
def is_feedable(self, tensor):
"""Returns `True` if and only if `tensor` is feedable."""
return tensor not in self._unfeedable_tensors
def prevent_fetching(self, op):
"""Marks the given `op` as unfetchable in this graph."""
self._unfetchable_ops.add(op)
def is_fetchable(self, tensor_or_op):
"""Returns `True` if and only if `tensor_or_op` is fetchable."""
if isinstance(tensor_or_op, Tensor):
return tensor_or_op.op not in self._unfetchable_ops
else:
return tensor_or_op not in self._unfetchable_ops
# TODO(agarwal): currently device directives in an outer eager scope will not
# apply to inner graph mode code. Fix that.
def device(device_name_or_function):
"""Wrapper for `Graph.device()` using the default graph.
See
@{tf.Graph.device}
for more details.
Args:
device_name_or_function: The device name or function to use in
the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If eager execution is enabled and a function is passed in.
"""
if context.in_graph_mode():
return get_default_graph().device(device_name_or_function)
else:
# TODO(agarwal): support device functions in EAGER mode.
if callable(device_name_or_function):
raise RuntimeError(
"tf.device does not support functions when eager execution "
"is enabled.")
return context.device(device_name_or_function)
def container(container_name):
"""Wrapper for `Graph.container()` using the default graph.
Args:
container_name: The container string to use in the context.
Returns:
A context manager that specifies the default container to use for newly
created stateful ops.
"""
return get_default_graph().container(container_name)
def colocate_with(op, ignore_existing=False):
if context.in_graph_mode():
return get_default_graph().colocate_with(op, ignore_existing)
else:
if op is not None:
return device(op.device)
else:
return _NullContextmanager()
def control_dependencies(control_inputs):
"""Wrapper for `Graph.control_dependencies()` using the default graph.
See @{tf.Graph.control_dependencies}
for more details.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
"""
if context.in_graph_mode():
return get_default_graph().control_dependencies(control_inputs)
else:
return _NullContextmanager()
class _DefaultStack(threading.local):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super(_DefaultStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if len(self.stack) >= 1 else None
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@tf_contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
try:
self.stack.append(default)
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
_default_session_stack = _DefaultStack() # pylint: disable=protected-access
def default_session(session):
"""Python "with" handler for defining a default session.
This function provides a means of registering a session for handling
Tensor.eval() and Operation.run() calls. It is primarily intended for use
by session.Session, but can be used with any object that implements
the Session.run() interface.
Use with the "with" keyword to specify that Tensor.eval() and Operation.run()
invocations within the scope of a block should be executed by a particular
session.
The default session applies to the current thread only, so it is always
possible to inspect the call stack and determine the scope of a default
session. If you create a new thread, and wish to use the default session
in that thread, you must explicitly add a "with ops.default_session(sess):"
block in that thread's function.
Example:
The following code examples are equivalent:
# 1. Using the Session object directly:
sess = ...
c = tf.constant(5.0)
sess.run(c)
# 2. Using default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
result = c.eval()
# 3. Overriding default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
with ops.default_session(...):
c.eval(session=sess)
Args:
session: The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
def get_default_session():
"""Returns the default session for the current thread.
The returned `Session` will be the innermost session on which a
`Session` or `Session.as_default()` context has been entered.
NOTE: The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
The default `Session` being used in the current thread.
"""
return _default_session_stack.get_default()
def _eval_using_default_session(tensors, feed_dict, graph, session=None):
"""Uses the default session to evaluate one or more tensors.
Args:
tensors: A single Tensor, or a list of Tensor objects.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which the tensors are defined.
session: (Optional) A different session to use to evaluate "tensors".
Returns:
Either a single numpy ndarray if "tensors" is a single tensor; or a list
of numpy ndarrays that each correspond to the respective element in
"tensors".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot evaluate tensor using `eval()`: No default "
"session is registered. Use `with "
"sess.as_default()` or pass an explicit session to "
"`eval(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph. Pass an explicit session to "
"`eval(session=sess)`.")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph.")
return session.run(tensors, feed_dict)
def _run_using_default_session(operation, feed_dict, graph, session=None):
"""Uses the default session to run "operation".
Args:
operation: The Operation to be run.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which "operation" is defined.
session: (Optional) A different session to use to run "operation".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot execute operation using `run()`: No default "
"session is registered. Use `with "
"sess.as_default():` or pass an explicit session to "
"`run(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to execute operation: "
"the operation's graph is different from the "
"session's graph. Pass an explicit session to "
"run(session=sess).")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to execute operation: "
"the operation's graph is different from the session's "
"graph.")
session.run(operation, feed_dict)
class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access
"""A thread-local stack of objects for providing an implicit default graph."""
def __init__(self):
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self):
"""Override that returns a global default if the stack is empty."""
ret = super(_DefaultGraphStack, self).get_default()
if ret is None:
ret = self._GetGlobalDefaultGraph()
return ret
def _GetGlobalDefaultGraph(self):
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
return self._global_default_graph
def reset(self):
super(_DefaultGraphStack, self).reset()
self._global_default_graph = None
_default_graph_stack = _DefaultGraphStack()
def enable_eager_execution(config=None, device_policy=None):
"""Enables, for the rest of the lifetime of this program, eager execution.
If not called immediately on startup risks creating breakage and bugs.
Example:
```python
tfe.enable_eager_execution()
# After eager execution is enabled, operations are executed as they are
# defined and `Tensor`s hold concrete values, which can be accessed as
# `numpy.ndarray`s through the `numpy()` method.
assert tf.multiply(6, 7).numpy() == 42
```
Args:
config: (Optional.) A `ConfigProto` protocol buffer with configuration
options for the Context. Note that a lot of these options may be
currently unimplemented or irrelevant when eager execution is enabled.
device_policy: (Optional.) What policy to use when trying to run an
operation on a device with inputs which are not on that device.
Valid values:
tfe.DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is not
correct.
tfe.DEVICE_PLACEMENT_WARN: copies the tensors which are not on the
right device but raises a warning.
tfe.DEVICE_PLACEMENT_SILENT: silently copies the tensors. This might
hide performance problems.
Raises:
ValueError: If trying to create a context after using graph operations
or if trying to create a context with nontrivial options which differ
from those of the existing context.
"""
# pylint: disable=protected-access
if context._default_mode == context.GRAPH_MODE:
graph_mode_has_been_used = (
_default_session_stack.stack or
_default_graph_stack._global_default_graph is not None)
if graph_mode_has_been_used:
raise ValueError(
"tfe.enable_eager_execution has to be called at program startup.")
context._default_mode = context.EAGER_MODE
if context._context is None:
context._context = context.Context(config=config,
device_policy=device_policy)
elif ((config is not None and config is not context._context._config)
or (device_policy is not None
and device_policy is not context._context._device_policy)):
raise ValueError("Trying to change the options of an active eager"
" execution. Context config: %s, specified config:"
" %s. Context device policy: %s; specified device"
" policy: %s." % (config, context._context._config,
device_policy,
context._context._device_policy))
else:
raise ValueError(
"tfe.enable_eager_execution has to be called at program startup.")
def eager_run(main=None, argv=None):
"""Runs the program with an optional main function and argv list.
The program will run with eager execution enabled.
Example:
```python
import tensorflow as tf
# Import subject to future changes:
from tensorflow.contrib.eager.python import tfe
def main(_):
u = tf.constant(6.0)
v = tf.constant(7.0)
print(u * v)
if __name__ == "__main__":
tfe.run()
```
Args:
main: the main function to run.
argv: the arguments to pass to it.
"""
enable_eager_execution()
app.run(main, argv)
def reset_default_graph():
"""Clears the default graph stack and resets the global default graph.
NOTE: The default graph is a property of the current thread. This
function applies only to the current thread. Calling this function while
a `tf.Session` or `tf.InteractiveSession` is active will result in undefined
behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
after calling this function will result in undefined behavior.
Raises:
AssertionError: If this function is called within a nested graph.
"""
if not _default_graph_stack.is_cleared():
raise AssertionError("Do not use tf.reset_default_graph() to clear "
"nested graphs. If you need a cleared graph, "
"exit the nesting and create a new graph.")
_default_graph_stack.reset()
def get_default_graph():
"""Returns the default graph for the current thread.
The returned graph will be the innermost graph on which a
`Graph.as_default()` context has been entered, or a global default
graph if none has been explicitly created.
NOTE: The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
Returns:
The default `Graph` being used in the current thread.
"""
return _default_graph_stack.get_default()
def get_name_scope():
"""Returns the current name scope in the default_graph.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
return get_default_graph().get_name_scope()
def _assert_same_graph(original_item, item):
"""Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
"""
if original_item.graph is not item.graph:
raise ValueError("%s must be from the same graph as %s." % (item,
original_item))
def _get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
This library method provides a consistent algorithm for choosing the graph
in which an Operation should be constructed:
1. If the default graph is being used to construct a function, we
use the default graph.
2. If the "graph" is specified explicitly, we validate that all of the inputs
in "op_input_list" are compatible with that graph.
3. Otherwise, we attempt to select a graph from the first Operation-
or Tensor-valued input in "op_input_list", and validate that all other
such inputs are in the same graph.
4. If the graph was not specified and it could not be inferred from
"op_input_list", we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If op_input_list is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
if get_default_graph().building_function:
return get_default_graph()
op_input_list = tuple(op_input_list) # Handle generators correctly
if graph and not isinstance(graph, Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
# 1. We validate that all of the inputs are from the same graph. This is
# either the supplied graph parameter, or the first one selected from one
# the graph-element-valued inputs. In the latter case, we hold onto
# that input in original_graph_element so we can provide a more
# informative error if a mismatch is found.
original_graph_element = None
for op_input in op_input_list:
# Determine if this is a valid graph_element.
# TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this
# up.
graph_element = None
if (isinstance(op_input, (Operation, _TensorLike)) and
((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = graph_element.graph
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError("%s is not from the passed-in graph." % graph_element)
# 2. If all else fails, we use the default graph, which is always there.
return graph or get_default_graph()
class GraphKeys(object):
"""Standard names to use for graph collections.
The standard library uses various well-known names to collect and
retrieve values associated with a graph. For example, the
`tf.Optimizer` subclasses default to optimizing the variables
collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is
specified, but it is also possible to pass an explicit list of
variables.
The following standard keys are defined:
* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
across distributed environment (model variables are subset of these). See
@{tf.global_variables}
for more details.
Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
* `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
machine. Usually used for temporarily variables, like counters.
Note: use `tf.contrib.framework.local_variable` to add to this collection.
* `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
model for inference (feed forward). Note: use
`tf.contrib.framework.model_variable` to add to this collection.
* `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
be trained by an optimizer. See
@{tf.trainable_variables}
for more details.
* `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
@{tf.summary.merge_all}
for more details.
* `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to
produce input for a computation. See
@{tf.train.start_queue_runners}
for more details.
* `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also
keep moving averages. See
@{tf.moving_average_variables}
for more details.
* `REGULARIZATION_LOSSES`: regularization losses collected during graph
construction.
The following standard keys are _defined_, but their collections are **not**
automatically populated as many of the others are:
* `WEIGHTS`
* `BIASES`
* `ACTIVATIONS`
"""
# Key to collect Variable objects that are global (shared across machines).
# Default collection for all variables, except local ones.
GLOBAL_VARIABLES = "variables"
# Key to collect local variables that are local to the machine and are not
# saved/restored.
LOCAL_VARIABLES = "local_variables"
# Key to collect local variables which are used to accumulate interal state
# to be used in tf.metrics.*.
METRIC_VARIABLES = "metric_variables"
# Key to collect model variables defined by layers.
MODEL_VARIABLES = "model_variables"
# Key to collect Variable objects that will be trained by the
# optimizers.
TRAINABLE_VARIABLES = "trainable_variables"
# Key to collect summaries.
SUMMARIES = "summaries"
# Key to collect QueueRunners.
QUEUE_RUNNERS = "queue_runners"
# Key to collect table initializers.
TABLE_INITIALIZERS = "table_initializer"
# Key to collect asset filepaths. An asset represents an external resource
# like a vocabulary file.
ASSET_FILEPATHS = "asset_filepaths"
# Key to collect Variable objects that keep moving averages.
MOVING_AVERAGE_VARIABLES = "moving_average_variables"
# Key to collect regularization losses at graph construction.
REGULARIZATION_LOSSES = "regularization_losses"
# Key to collect concatenated sharded variables.
CONCATENATED_VARIABLES = "concatenated_variables"
# Key to collect savers.
SAVERS = "savers"
# Key to collect weights
WEIGHTS = "weights"
# Key to collect biases
BIASES = "biases"
# Key to collect activations
ACTIVATIONS = "activations"
# Key to collect update_ops
UPDATE_OPS = "update_ops"
# Key to collect losses
LOSSES = "losses"
# Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.
SAVEABLE_OBJECTS = "saveable_objects"
# Key to collect all shared resources used by the graph which need to be
# initialized once per cluster.
RESOURCES = "resources"
# Key to collect all shared resources used in this graph which need to be
# initialized once per session.
LOCAL_RESOURCES = "local_resources"
# Trainable resource-style variables.
TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables"
# Key to indicate various ops.
INIT_OP = "init_op"
LOCAL_INIT_OP = "local_init_op"
READY_OP = "ready_op"
READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op"
SUMMARY_OP = "summary_op"
GLOBAL_STEP = "global_step"
# Used to count the number of evaluations performed during a single evaluation
# run.
EVAL_STEP = "eval_step"
TRAIN_OP = "train_op"
# Key for control flow context.
COND_CONTEXT = "cond_context"
WHILE_CONTEXT = "while_context"
# Used to store v2 summary names.
_SUMMARY_COLLECTION = "_SUMMARY_V2"
# List of all collections that keep track of variables.
_VARIABLE_COLLECTIONS = [
GLOBAL_VARIABLES,
LOCAL_VARIABLES,
METRIC_VARIABLES,
MODEL_VARIABLES,
TRAINABLE_VARIABLES,
MOVING_AVERAGE_VARIABLES,
CONCATENATED_VARIABLES,
TRAINABLE_RESOURCE_VARIABLES,
]
# Key for streaming model ports.
# NOTE(yuanbyu): internal and experimental.
_STREAMING_MODEL_PORTS = "streaming_model_ports"
@decorator_utils.classproperty
def VARIABLES(cls): # pylint: disable=no-self-argument
logging.log_first_n(logging.WARN,
"VARIABLES collection name is deprecated, please use "
"GLOBAL_VARIABLES instead; VARIABLES will be removed "
"after 2017-03-02.", 1)
return cls.GLOBAL_VARIABLES
def add_to_collection(name, value):
"""Wrapper for `Graph.add_to_collection()` using the default graph.
See @{tf.Graph.add_to_collection}
for more details.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
get_default_graph().add_to_collection(name, value)
def add_to_collections(names, value):
"""Wrapper for `Graph.add_to_collections()` using the default graph.
See @{tf.Graph.add_to_collections}
for more details.
Args:
names: The key for the collections. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
get_default_graph().add_to_collections(names, value)
def get_collection_ref(key):
"""Wrapper for `Graph.get_collection_ref()` using the default graph.
See @{tf.Graph.get_collection_ref}
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection. Note that this returns
the collection list itself, which can be modified in place to change the
collection.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection_ref(key)
def get_collection(key, scope=None):
"""Wrapper for `Graph.get_collection()` using the default graph.
See @{tf.Graph.get_collection}
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) If supplied, the resulting list is filtered to include
only items whose `name` attribute matches using `re.match`. Items
without a `name` attribute are never returned if a scope is supplied and
the choice or `re.match` means that a `scope` without special tokens
filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection(key, scope)
def get_all_collection_keys():
"""Returns a list of collections used in the default graph."""
return get_default_graph().get_all_collection_keys()
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
class name_scope(object): # pylint: disable=invalid-name
"""A context manager for use when defining a Python op.
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
@{tf.Graph.name_scope}
for more details on that).
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope(name, "MyOp", [a, b, c]) as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
"""
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
"""
self._name = default_name if name is None else name
self._default_name = default_name
self._values = values
self._ctx = context.context()
self._in_eager_mode = self._ctx.in_eager_mode()
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
if self._in_eager_mode:
self._old_name = self._ctx.scope_name
if self._name:
scope_name = (self._old_name + self._name + "/"
if self._old_name else self._name + "/")
else:
scope_name = ""
self._ctx.scope_name = scope_name
return scope_name
else:
if self._name is None and self._values is not None:
# We only raise an error if values is not None (provided) because
# currently tf.name_scope(None) (values=None then) is sometimes used as
# an idiom to reset to top scope.
raise ValueError(
"At least one of name (%s) and default_name (%s) must be provided."
% (self._name, self._default_name))
if self._values is None:
self._values = []
g = _get_graph_from_inputs(self._values)
self._g_manager = g.as_default()
self._g_manager.__enter__()
self._name_scope = g.name_scope(self._name)
return self._name_scope.__enter__()
def __exit__(self, type_arg, value_arg, traceback_arg):
if self._in_eager_mode:
self._ctx.scope_name = self._old_name
else:
self._name_scope.__exit__(type_arg, value_arg, traceback_arg)
self._g_manager.__exit__(type_arg, value_arg, traceback_arg)
return False # False values do not suppress exceptions
def strip_name_scope(name, export_scope):
"""Removes name scope from a name.
Args:
name: A `string` name.
export_scope: Optional `string`. Name scope to remove.
Returns:
Name with name scope removed, or the original name if export_scope
is None.
"""
if export_scope:
try:
# Strips export_scope/, export_scope///,
# ^export_scope/, loc:@export_scope/.
str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)"
return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1)
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
def prepend_name_scope(name, import_scope):
"""Prepends name scope to a name.
Args:
name: A `string` name.
import_scope: Optional `string`. Name scope to add.
Returns:
Name with name scope added, or the original name if import_scope
is None.
"""
if import_scope:
try:
str_to_replace = r"([\^]|loc:@|^)(.*)"
return re.sub(str_to_replace, r"\1" + import_scope + r"/\2",
compat.as_str(name))
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
# pylint: disable=g-doc-return-or-yield
# pylint: disable=not-context-manager
@tf_contextlib.contextmanager
def op_scope(values, name, default_name=None):
"""DEPRECATED. Same as name_scope above, just different argument order."""
logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
" use tf.name_scope(name, default_name, values)")
with name_scope(name, default_name=default_name, values=values) as scope:
yield scope
_proto_function_registry = registry.Registry("proto functions")
def register_proto_function(collection_name,
proto_type=None,
to_proto=None,
from_proto=None):
"""Registers `to_proto` and `from_proto` functions for collection_name.
`to_proto` function converts a Python object to the corresponding protocol
buffer, and returns the protocol buffer.
`from_proto` function converts protocol buffer into a Python object, and
returns the object..
Args:
collection_name: Name of the collection.
proto_type: Protobuf type, such as `saver_pb2.SaverDef`,
`variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..
to_proto: Function that implements Python object to protobuf conversion.
from_proto: Function that implements protobuf to Python object conversion.
"""
if to_proto and not callable(to_proto):
raise TypeError("to_proto must be callable.")
if from_proto and not callable(from_proto):
raise TypeError("from_proto must be callable.")
_proto_function_registry.register((proto_type, to_proto, from_proto),
collection_name)
def get_collection_proto_type(collection_name):
"""Returns the proto_type for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[0]
except LookupError:
return None
def get_to_proto_function(collection_name):
"""Returns the to_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[1]
except LookupError:
return None
def get_from_proto_function(collection_name):
"""Returns the from_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[2]
except LookupError:
return None
def _assert_collection_is_ok(collection_name):
if context.in_eager_mode():
if collection_name in GraphKeys._VARIABLE_COLLECTIONS: # pylint: disable=protected-access
raise ValueError("When Eager Execution is enabled, variable "
"collections are not supported.")
def _operation_conversion_error(op, dtype=None, name=None, as_ref=False):
"""Produce a nice error if someone converts an Operation to a Tensor."""
raise TypeError(("Can't convert Operation '%s' to Tensor "
"(target dtype=%r, name=%r, as_ref=%r)") % (op.name, dtype,
name, as_ref))
register_tensor_conversion_function(Operation, _operation_conversion_error)
| {
"content_hash": "472154c86ea29794553ad2e3ace6ccee",
"timestamp": "",
"source": "github",
"line_count": 5380,
"max_line_length": 115,
"avg_line_length": 35.48048327137546,
"alnum_prop": 0.655824187337926,
"repo_name": "guschmue/tensorflow",
"id": "dc4ffb174782d6148594e4ca8f1078640ef7905e",
"size": "191574",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/framework/ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8535"
},
{
"name": "C",
"bytes": "314452"
},
{
"name": "C++",
"bytes": "34310628"
},
{
"name": "CMake",
"bytes": "211937"
},
{
"name": "Go",
"bytes": "1012495"
},
{
"name": "Java",
"bytes": "533607"
},
{
"name": "Jupyter Notebook",
"bytes": "1940884"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "44807"
},
{
"name": "Objective-C",
"bytes": "12460"
},
{
"name": "Objective-C++",
"bytes": "94483"
},
{
"name": "PHP",
"bytes": "1429"
},
{
"name": "Perl",
"bytes": "6186"
},
{
"name": "Perl 6",
"bytes": "1360"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "30064150"
},
{
"name": "Ruby",
"bytes": "547"
},
{
"name": "Shell",
"bytes": "402121"
}
],
"symlink_target": ""
} |
from pyarrow.compat import unittest
import pyarrow as arrow
A = arrow
class TestTypes(unittest.TestCase):
def test_integers(self):
dtypes = ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64']
for name in dtypes:
factory = getattr(arrow, name)
t = factory()
assert str(t) == name
def test_list(self):
value_type = arrow.int32()
list_type = arrow.list_(value_type)
assert str(list_type) == 'list<item: int32>'
def test_string(self):
t = arrow.string()
assert str(t) == 'string'
def test_field(self):
t = arrow.string()
f = arrow.field('foo', t)
assert f.name == 'foo'
assert f.nullable
assert f.type is t
assert repr(f) == "Field('foo', type=string)"
f = arrow.field('foo', t, False)
assert not f.nullable
def test_schema(self):
fields = [
A.field('foo', A.int32()),
A.field('bar', A.string()),
A.field('baz', A.list_(A.int8()))
]
sch = A.schema(fields)
assert len(sch) == 3
assert sch[0].name == 'foo'
assert sch[0].type == fields[0].type
assert repr(sch) == """\
foo: int32
bar: string
baz: list<item: int8>"""
def test_schema_equals(self):
fields = [
A.field('foo', A.int32()),
A.field('bar', A.string()),
A.field('baz', A.list_(A.int8()))
]
sch1 = A.schema(fields)
print(dir(sch1))
sch2 = A.schema(fields)
assert sch1.equals(sch2)
del fields[-1]
sch3 = A.schema(fields)
assert not sch1.equals(sch3)
| {
"content_hash": "a94a51069dfb1618b43293006f25c1e2",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 56,
"avg_line_length": 24.82857142857143,
"alnum_prop": 0.5097813578826237,
"repo_name": "TheNeuralBit/arrow",
"id": "507ebb878d87b866dbfda66e8e6c9d79b3824116",
"size": "2524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyarrow/tests/test_schema.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11203"
},
{
"name": "C++",
"bytes": "779145"
},
{
"name": "CMake",
"bytes": "130695"
},
{
"name": "FreeMarker",
"bytes": "2404"
},
{
"name": "Java",
"bytes": "906110"
},
{
"name": "Makefile",
"bytes": "8192"
},
{
"name": "Perl",
"bytes": "3799"
},
{
"name": "Python",
"bytes": "316662"
},
{
"name": "Shell",
"bytes": "27802"
}
],
"symlink_target": ""
} |
"""
This example demonstrates the use of pyqtgraph's parametertree system. This provides
a simple way to generate user interfaces that control sets of parameters. The example
demonstrates a variety of different parameter types (int, float, list, etc.)
as well as some customized parameter types
"""
import initExample ## Add path to library (just for examples; you do not need this)
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
app = QtGui.QApplication([])
import pyqtgraph.parametertree.parameterTypes as pTypes
from pyqtgraph.parametertree import Parameter, ParameterTree, ParameterItem, registerParameterType
## test subclassing parameters
## This parameter automatically generates two child parameters which are always reciprocals of each other
class ComplexParameter(pTypes.GroupParameter):
def __init__(self, **opts):
opts['type'] = 'bool'
opts['value'] = True
pTypes.GroupParameter.__init__(self, **opts)
self.addChild({'name': 'A = 1/B', 'type': 'float', 'value': 7, 'suffix': 'Hz', 'siPrefix': True})
self.addChild({'name': 'B = 1/A', 'type': 'float', 'value': 1/7., 'suffix': 's', 'siPrefix': True})
self.a = self.param('A = 1/B')
self.b = self.param('B = 1/A')
self.a.sigValueChanged.connect(self.aChanged)
self.b.sigValueChanged.connect(self.bChanged)
def aChanged(self):
self.b.setValue(1.0 / self.a.value(), blockSignal=self.bChanged)
def bChanged(self):
self.a.setValue(1.0 / self.b.value(), blockSignal=self.aChanged)
## test add/remove
## this group includes a menu allowing the user to add new parameters into its child list
class ScalableGroup(pTypes.GroupParameter):
def __init__(self, **opts):
opts['type'] = 'group'
opts['addText'] = "Add"
opts['addList'] = ['str', 'float', 'int']
pTypes.GroupParameter.__init__(self, **opts)
def addNew(self, typ):
val = {
'str': '',
'float': 0.0,
'int': 0
}[typ]
self.addChild(dict(name="ScalableParam %d" % (len(self.childs)+1), type=typ, value=val, removable=True, renamable=True))
params = [
{'name': 'Basic parameter data types', 'type': 'group', 'children': [
{'name': 'Integer', 'type': 'int', 'value': 10},
{'name': 'Float', 'type': 'float', 'value': 10.5, 'step': 0.1},
{'name': 'String', 'type': 'str', 'value': "hi"},
{'name': 'List', 'type': 'list', 'values': [1,2,3], 'value': 2},
{'name': 'Named List', 'type': 'list', 'values': {"one": 1, "two": "twosies", "three": [3,3,3]}, 'value': 2},
{'name': 'Boolean', 'type': 'bool', 'value': True, 'tip': "This is a checkbox"},
{'name': 'Color', 'type': 'color', 'value': "FF0", 'tip': "This is a color button"},
{'name': 'Gradient', 'type': 'colormap'},
{'name': 'Subgroup', 'type': 'group', 'children': [
{'name': 'Sub-param 1', 'type': 'int', 'value': 10},
{'name': 'Sub-param 2', 'type': 'float', 'value': 1.2e6},
]},
{'name': 'Text Parameter', 'type': 'text', 'value': 'Some text...'},
{'name': 'Action Parameter', 'type': 'action'},
]},
{'name': 'Numerical Parameter Options', 'type': 'group', 'children': [
{'name': 'Units + SI prefix', 'type': 'float', 'value': 1.2e-6, 'step': 1e-6, 'siPrefix': True, 'suffix': 'V'},
{'name': 'Limits (min=7;max=15)', 'type': 'int', 'value': 11, 'limits': (7, 15), 'default': -6},
{'name': 'DEC stepping', 'type': 'float', 'value': 1.2e6, 'dec': True, 'step': 1, 'siPrefix': True, 'suffix': 'Hz'},
]},
{'name': 'Save/Restore functionality', 'type': 'group', 'children': [
{'name': 'Save State', 'type': 'action'},
{'name': 'Restore State', 'type': 'action', 'children': [
{'name': 'Add missing items', 'type': 'bool', 'value': True},
{'name': 'Remove extra items', 'type': 'bool', 'value': True},
]},
]},
{'name': 'Extra Parameter Options', 'type': 'group', 'children': [
{'name': 'Read-only', 'type': 'float', 'value': 1.2e6, 'siPrefix': True, 'suffix': 'Hz', 'readonly': True},
{'name': 'Renamable', 'type': 'float', 'value': 1.2e6, 'siPrefix': True, 'suffix': 'Hz', 'renamable': True},
{'name': 'Removable', 'type': 'float', 'value': 1.2e6, 'siPrefix': True, 'suffix': 'Hz', 'removable': True},
]},
ComplexParameter(name='Custom parameter group (reciprocal values)'),
ScalableGroup(name="Expandable Parameter Group", children=[
{'name': 'ScalableParam 1', 'type': 'str', 'value': "default param 1"},
{'name': 'ScalableParam 2', 'type': 'str', 'value': "default param 2"},
]),
]
## Create tree of Parameter objects
p = Parameter.create(name='params', type='group', children=params)
## If anything changes in the tree, print a message
def change(param, changes):
print("tree changes:")
for param, change, data in changes:
path = p.childPath(param)
if path is not None:
childName = '.'.join(path)
else:
childName = param.name()
print(' parameter: %s'% childName)
print(' change: %s'% change)
print(' data: %s'% str(data))
print(' ----------')
p.sigTreeStateChanged.connect(change)
def valueChanging(param, value):
print("Value changing (not finalized):", param, value)
# Too lazy for recursion:
for child in p.children():
child.sigValueChanging.connect(valueChanging)
for ch2 in child.children():
ch2.sigValueChanging.connect(valueChanging)
def save():
global state
state = p.saveState()
def restore():
global state
add = p['Save/Restore functionality', 'Restore State', 'Add missing items']
rem = p['Save/Restore functionality', 'Restore State', 'Remove extra items']
p.restoreState(state, addChildren=add, removeChildren=rem)
p.param('Save/Restore functionality', 'Save State').sigActivated.connect(save)
p.param('Save/Restore functionality', 'Restore State').sigActivated.connect(restore)
## Create two ParameterTree widgets, both accessing the same data
t = ParameterTree()
t.setParameters(p, showTop=False)
t.setWindowTitle('pyqtgraph example: Parameter Tree')
t2 = ParameterTree()
t2.setParameters(p, showTop=False)
win = QtGui.QWidget()
layout = QtGui.QGridLayout()
win.setLayout(layout)
layout.addWidget(QtGui.QLabel("These are two views of the same data. They should always display the same values."), 0, 0, 1, 2)
layout.addWidget(t, 1, 0, 1, 1)
layout.addWidget(t2, 1, 1, 1, 1)
win.show()
win.resize(800,800)
## test save/restore
s = p.saveState()
p.restoreState(s)
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| {
"content_hash": "bb4e4a7f960d4551e964c618858458e9",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 128,
"avg_line_length": 40.18390804597701,
"alnum_prop": 0.6045480549199085,
"repo_name": "vallsv/pyqtgraph",
"id": "6e8e0dbdd8b846eded9a650916f27db61ec90a6a",
"size": "7016",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "examples/parametertree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "135399"
},
{
"name": "Matlab",
"bytes": "1752"
},
{
"name": "Python",
"bytes": "2095110"
}
],
"symlink_target": ""
} |
import os
import re
import random
import hashlib
# import binascii
import datetime
import uuid
from rdoclient import RandomOrgClient
import config
# get time in format I like
def get_timestamp():
"""
Function to generate timestamp for use in application
:return timestamp:
"""
dt = datetime.datetime.now()
return dt.strftime("%Y-%m-%d %X")
def gen_uid(length=10):
"""
Function to generate random uuid of varying length for application
:param length: length of uid
:return uid: formatted string
"""
# TODO - find one that works in both v2.x/3.x...
# python 3.x version
uid = uuid.uuid4()
tmp_uid = re.sub('-', '', str(uid))
return ''.join(random.sample(list(tmp_uid), length))
def hash_password(password, salt_length=16, iterations=1000000, encoding='utf-8'):
"""
Function to securely hash password with variable salt and iterations
:param password: input secret
:param salt_length: length of salt
:param iterations: number of times to cycle this algorithm
:param encoding: character encoding
:return: hashed password
"""
salt = os.urandom(salt_length)
hashed_password = hashlib.pbkdf2_hmac(
hash_name='sha256',
password=bytes(password, encoding),
salt=salt,
iterations=iterations,
)
# Non-bytes version
# return binascii.hexlify(hashed_password)
# Bytes version
return hashed_password
def get_roc(api_key=config.API_KEY):
"""
Get instance of RandomOrgClient for testing.
:param api_key: API key to fetch API client
:return: instance of ROC
"""
try:
roc = RandomOrgClient(api_key)
return roc
except (ValueError, AttributeError) as e:
print(e)
| {
"content_hash": "72928fee86fcc8bca24b94b4bc782df0",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 82,
"avg_line_length": 21.621951219512194,
"alnum_prop": 0.6576424139875916,
"repo_name": "nicorellius/password-generator",
"id": "d9daf586c6304400696ddae638a54dc3cca1ab8b",
"size": "1773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35675"
}
],
"symlink_target": ""
} |
from django.contrib.postgres.fields import JSONField
from django.utils.translation import ugettext as _
from wagtail.core.models import Page
from wagtail.core.fields import RichTextField
from wagtail.admin.edit_handlers import FieldPanel
class BaseModel(Page):
# core fields
section_title = RichTextField(blank=True)
section_subtitle = RichTextField(blank=True)
# seo field
linked_data = JSONField(null=True, blank=True, help_text=_("Linked Data in JSON"))
content_panels = Page.content_panels + [
FieldPanel("section_title"),
FieldPanel("section_subtitle"),
]
promote_panels = Page.promote_panels + [FieldPanel("linked_data")]
class Meta:
abstract = True
| {
"content_hash": "4d343c4668c8f8f3e8aa22bbd491a6e1",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 86,
"avg_line_length": 28.92,
"alnum_prop": 0.7164591977869986,
"repo_name": "evonove/evonove.it",
"id": "9387591ab2f22b15455a7544317b38124fb7b379",
"size": "723",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "django-website/core/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1655"
},
{
"name": "HTML",
"bytes": "49434"
},
{
"name": "JavaScript",
"bytes": "43596"
},
{
"name": "Makefile",
"bytes": "1243"
},
{
"name": "Python",
"bytes": "92905"
},
{
"name": "SCSS",
"bytes": "48162"
},
{
"name": "Shell",
"bytes": "191"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import os
import typing as T
import xml.etree.ElementTree as ET
from .vs2010backend import Vs2010Backend
from ..mesonlib import MesonException
if T.TYPE_CHECKING:
from ..build import Build
from ..interpreter import Interpreter
class Vs2017Backend(Vs2010Backend):
def __init__(self, build: T.Optional[Build], interpreter: T.Optional[Interpreter]):
super().__init__(build, interpreter)
self.name = 'vs2017'
self.vs_version = '2017'
self.sln_file_version = '12.00'
self.sln_version_comment = '15'
# We assume that host == build
if self.environment is not None:
comps = self.environment.coredata.compilers.host
if comps:
if comps and all(c.id == 'clang-cl' for c in comps.values()):
self.platform_toolset = 'llvm'
elif comps and all(c.id == 'intel-cl' for c in comps.values()):
c = list(comps.values())[0]
if c.version.startswith('19'):
self.platform_toolset = 'Intel C++ Compiler 19.0'
else:
# We don't have support for versions older than 2019 right now.
raise MesonException('There is currently no support for ICL before 19, patches welcome.')
if self.platform_toolset is None:
self.platform_toolset = 'v141'
# WindowsSDKVersion should be set by command prompt.
sdk_version = os.environ.get('WindowsSDKVersion', None)
if sdk_version:
self.windows_target_platform_version = sdk_version.rstrip('\\')
def generate_debug_information(self, link):
# valid values for vs2017 is 'false', 'true', 'DebugFastLink', 'DebugFull'
ET.SubElement(link, 'GenerateDebugInformation').text = 'DebugFull'
def generate_lang_standard_info(self, file_args, clconf):
if 'cpp' in file_args:
optargs = [x for x in file_args['cpp'] if x.startswith('/std:c++')]
if optargs:
ET.SubElement(clconf, 'LanguageStandard').text = optargs[0].replace("/std:c++", "stdcpp")
if 'c' in file_args:
optargs = [x for x in file_args['c'] if x.startswith('/std:c')]
if optargs:
ET.SubElement(clconf, 'LanguageStandard_C').text = optargs[0].replace("/std:c", "stdc")
| {
"content_hash": "4cf8fe93479276f701113a933823704c",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 113,
"avg_line_length": 44.68518518518518,
"alnum_prop": 0.597596353087443,
"repo_name": "pexip/meson",
"id": "4ed5e483c4b82b26572d9a7e21179fc2e6fcfd38",
"size": "3005",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mesonbuild/backend/vs2017backend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4913"
},
{
"name": "Batchfile",
"bytes": "1499"
},
{
"name": "C",
"bytes": "203464"
},
{
"name": "C#",
"bytes": "1130"
},
{
"name": "C++",
"bytes": "59032"
},
{
"name": "CMake",
"bytes": "38429"
},
{
"name": "Cuda",
"bytes": "10592"
},
{
"name": "Cython",
"bytes": "1921"
},
{
"name": "D",
"bytes": "7840"
},
{
"name": "Fortran",
"bytes": "12248"
},
{
"name": "Genie",
"bytes": "476"
},
{
"name": "HTML",
"bytes": "897"
},
{
"name": "Inno Setup",
"bytes": "354"
},
{
"name": "Java",
"bytes": "3768"
},
{
"name": "JavaScript",
"bytes": "150"
},
{
"name": "LLVM",
"bytes": "75"
},
{
"name": "Lex",
"bytes": "219"
},
{
"name": "Limbo",
"bytes": "28"
},
{
"name": "Meson",
"bytes": "595904"
},
{
"name": "Objective-C",
"bytes": "686"
},
{
"name": "Objective-C++",
"bytes": "378"
},
{
"name": "PowerShell",
"bytes": "4748"
},
{
"name": "Python",
"bytes": "4096804"
},
{
"name": "Roff",
"bytes": "625"
},
{
"name": "Rust",
"bytes": "4039"
},
{
"name": "Shell",
"bytes": "12539"
},
{
"name": "Swift",
"bytes": "1152"
},
{
"name": "Vala",
"bytes": "10033"
},
{
"name": "Verilog",
"bytes": "696"
},
{
"name": "Vim Script",
"bytes": "10684"
},
{
"name": "Yacc",
"bytes": "103"
}
],
"symlink_target": ""
} |
import os
from common import BASE_DIR
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request'
],
},
},
]
| {
"content_hash": "8c14a4e328d9031c830fd0551128eee7",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 70,
"avg_line_length": 29.045454545454547,
"alnum_prop": 0.543035993740219,
"repo_name": "pomahtuk/py-cooking",
"id": "2a817af3c871ad6ad4cc29bea705d445bea9e179",
"size": "639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycooking/proj_settings/templates.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "203"
},
{
"name": "Python",
"bytes": "6676"
}
],
"symlink_target": ""
} |
"""Analytic approximation for European option prices using SABR model."""
import enum
import tensorflow.compat.v2 as tf
@enum.unique
class SabrApproximationType(enum.Enum):
"""Approximation to the SABR model.
* `HAGAN`: Using the Hagan approximation [1].
#### References
[1] Hagan et al, Managing Smile Risk, Wilmott (2002), 1:84-108
"""
HAGAN = 1
@enum.unique
class SabrImpliedVolatilityType(enum.Enum):
"""The implied volality arising from the SABR approximate solution.
* `NORMAL`: The volatility for the normal model, i.e. the `sigma_n` for a
stochastic model of the underlying `F` behaving like:
```
dF = sigma_n dW
```
* `LOGNORMAL`: The volatility for the lognomal (aka Black) model, i.e. the
`sigma_B` for a stochastic model of the underlying `F` behaving like:
```
dF = sigma_b F dW
```
"""
NORMAL = 1
LOGNORMAL = 2
def implied_volatility(*,
strikes,
expiries,
forwards,
alpha,
beta,
volvol,
rho,
shift=0.0,
volatility_type=SabrImpliedVolatilityType.LOGNORMAL,
approximation_type=SabrApproximationType.HAGAN,
dtype=None,
name=None):
"""Computes the implied volatility under the SABR model.
The SABR model specifies the risk neutral dynamics of the underlying as the
following set of stochastic differential equations:
```
dF = sigma F^beta dW_1
dsigma = volvol sigma dW_2
dW1 dW2 = rho dt
F(0) = f
sigma(0) = alpha
```
where F(t) represents the value of the forward price as a function of time,
and sigma(t) is the volatility.
Here, we implement an approximate solution as proposed by Hagan [1], and back
out the equivalent implied volatility that would've been obtained under either
the normal model or the Black model.
#### Example
```python
import tf_quant_finance as tff
import tensorflow.compat.v2 as tf
equiv_vol = tff.models.sabr.approximations.implied_volatility(
strikes=np.array([106.0, 11.0]),
expiries=np.array([17.0 / 365.0, 400.0 / 365.0]),
forwards=np.array([120.0, 20.0]),
alpha=1.63,
beta=0.6,
rho=0.00002,
volvol=3.3,
dtype=tf.float64)
# Expected: [0.33284656705268817, 1.9828728139982792]
# Running this inside a unit test passes:
# equiv_vol = self.evaluate(equiv_vol)
# self.assertAllClose(equiv_vol, 0.33284656705268817)
```
#### References
[1] Hagan et al, Managing Smile Risk, Wilmott (2002), 1:84-108
Args:
strikes: Real `Tensor` of arbitrary shape, specifying the strike prices.
Values must be strictly positive.
expiries: Real `Tensor` of shape compatible with that of `strikes`,
specifying the corresponding time-to-expiries of the options. Values must
be strictly positive.
forwards: Real `Tensor` of shape compatible with that of `strikes`,
specifying the observed forward prices of the underlying. Values must be
strictly positive.
alpha: Real `Tensor` of shape compatible with that of `strikes`, specifying
the initial values of the stochastic volatility. Values must be strictly
positive.
beta: Real `Tensor` of shape compatible with that of `strikes`, specifying
the model exponent `beta`. Values must satisfy 0 <= `beta` <= 1.
volvol: Real `Tensor` of shape compatible with that of `strikes`,
specifying the model vol-vol multipliers. Values of `volvol` must be
non-negative.
rho: Real `Tensor` of shape compatible with that of `strikes`, specifying
the correlation factors between the Wiener processes modeling the forward
and the volatility. Values must satisfy -1 < `rho` < 1.
shift: Optional `Tensor` of shape compatible with that of `strkies`,
specifying the shift parameter(s). In the shifted model, the process
modeling the forward is modified as: dF = sigma * (F + shift) ^ beta * dW.
With this modification, negative forward rates are valid as long as
F > -shift.
Default value: 0.0
volatility_type: Either SabrImpliedVolatility.NORMAL or LOGNORMAL.
Default value: `LOGNORMAL`.
approximation_type: Instance of `SabrApproxmationScheme`.
Default value: `HAGAN`.
dtype: Optional: `tf.DType`. If supplied, the dtype to be used for
converting values to `Tensor`s.
Default value: `None`, which means that the default dtypes inferred from
`strikes` is used.
name: str. The name for the ops created by this function.
Default value: 'sabr_approx_implied_volatility'.
Returns:
A real `Tensor` of the same shape as `strikes`, containing the
corresponding equivalent implied volatilities.
"""
name = name or 'sabr_approx_implied_volatility'
del approximation_type # Currently, only HAGAN approximation is supported.
with tf.name_scope(name):
strikes = tf.convert_to_tensor(strikes, dtype=dtype, name='strikes')
dtype = dtype or strikes.dtype
expiries = tf.convert_to_tensor(expiries, dtype=dtype, name='expiries')
forwards = tf.convert_to_tensor(forwards, dtype=dtype, name='forwards')
alpha = tf.convert_to_tensor(alpha, dtype=dtype, name='alpha')
beta = tf.convert_to_tensor(beta, dtype=dtype, name='beta')
rho = tf.convert_to_tensor(rho, dtype=dtype, name='rho')
volvol = tf.convert_to_tensor(volvol, dtype=dtype, name='volvol')
# Apply the shift.
strikes += shift
forwards += shift
moneyness = forwards / strikes
log_moneyness = tf.math.log(moneyness)
adj_moneyness = tf.math.pow(moneyness, 1.0 - beta)
sqrt_adj_moneyness = tf.math.sqrt(adj_moneyness)
# adjusted alpha = alpha * K^(beta - 1)
adj_alpha = alpha * tf.math.pow(strikes, beta - 1.0)
# Zeta, as defined in (eq. A.69b in [1])
zeta = (volvol / adj_alpha) * sqrt_adj_moneyness * log_moneyness
# Zeta / xhat(zeta), as defined in (eq. A.69b in [1])
zeta_by_xhat = _zeta_by_xhat(zeta, rho, dtype)
# This is the denominator term occurring in the ((1 + ...) / (1 + ...)) of
# (eq. A.69a) in [1].
denom = _denom(beta, log_moneyness)
# The correction terms occurring in (1 + {...}) of (eq. A.69a) of [1], where
# we have multiplied in the "t_ex" to make the quantities dimensionless.
correction_2 = ((rho * beta / 4.0) * (1.0 / sqrt_adj_moneyness) *
(adj_alpha * volvol * expiries))
correction_3 = ((2.0 - 3.0 * rho * rho) / 24.0
* (volvol * volvol * expiries))
if volatility_type == SabrImpliedVolatilityType.NORMAL:
correction_1 = ((-beta * (2.0 - beta) / 24.0) * (1.0 / adj_moneyness) *
(adj_alpha * adj_alpha * expiries))
# This is the denominator term occurring in the ((1 + ...) / (1 + ...)) of
# (eq. A.69a) in [1], and is effectively the same as setting beta = 0.0
number = _denom(0.0, log_moneyness)
return (adj_alpha * strikes * tf.math.pow(moneyness, beta / 2.0) *
(number / denom) * zeta_by_xhat *
(1 + correction_1 + correction_2 + correction_3))
elif volatility_type == SabrImpliedVolatilityType.LOGNORMAL:
correction_1 = (((1.0 - beta) *
(1.0 - beta) / 24.0) * (1.0 / adj_moneyness) *
(adj_alpha * adj_alpha * expiries))
return (adj_alpha * (1.0 / sqrt_adj_moneyness) * (1.0 / denom) *
zeta_by_xhat * (1.0 + correction_1 + correction_2 + correction_3))
else:
raise ValueError('Invalid value of `volatility_type`')
def _epsilon(dtype):
dtype = tf.as_dtype(dtype).as_numpy_dtype
eps = 1e-6 if dtype == tf.float32.as_numpy_dtype else 1e-10
return eps
def _zeta_by_xhat(zeta, rho, dtype):
zbxh = tf.math.divide_no_nan(
zeta,
tf.math.log(
(tf.math.sqrt(1 - 2 * rho * zeta + zeta * zeta) - rho + zeta) /
(1.0 - rho)))
eps = _epsilon(dtype)
# When zeta -> 0, the limit of zeta / x_hat(zeta) reduces to 1.0
return tf.where(tf.abs(zeta) > eps, zbxh, 1.0)
def _denom(beta, log_f_by_k):
s = (1.0 - beta) * log_f_by_k
s_squared = s * s
return 1.0 + s_squared / 24.0 + (s_squared * s_squared) / 1920.0
| {
"content_hash": "5e2a83dfb0a08e4cef0e547ae4e26f5c",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 80,
"avg_line_length": 36.87665198237885,
"alnum_prop": 0.6312268546171306,
"repo_name": "google/tf-quant-finance",
"id": "d7eeca917d49fed322d4126c6945c522e5d1f6da",
"size": "8946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tf_quant_finance/models/sabr/approximations/implied_volatility.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "5759"
},
{
"name": "Jupyter Notebook",
"bytes": "1634001"
},
{
"name": "Python",
"bytes": "3661863"
},
{
"name": "Shell",
"bytes": "2338"
},
{
"name": "Starlark",
"bytes": "109192"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range, object
import json
import logging
import os
import subprocess
import sys
import time
import uuid
import proxy.conf
import tempfile
from configobj import ConfigObj
from django.db.models import query, CharField, SmallIntegerField
from django.core.management import call_command
from django.core.paginator import Paginator
from django.db import connection
from django.urls import reverse
from django.test.client import Client
from django.views.static import serve
from django.http import HttpResponse
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
from nose.tools import assert_true, assert_false, assert_equal, assert_not_equal, assert_raises, nottest, raises
from dashboard.conf import HAS_SQL_ENABLED
from desktop.settings import DATABASES
from useradmin.models import GroupPermission, User
import desktop
import desktop.conf
import desktop.urls
import desktop.redaction as redaction
import desktop.views as views
from desktop.auth.backend import rewrite_user
from desktop.appmanager import DESKTOP_APPS
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.conf import validate_path
from desktop.lib.django_util import TruncatingModel
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.conf import _configs_from_dir
from desktop.lib.paths import get_desktop_root
from desktop.lib.python_util import force_dict_to_strings
from desktop.lib.test_utils import grant_access
from desktop.middleware import DJANGO_VIEW_AUTH_WHITELIST
from desktop.models import Directory, Document, Document2, get_data_link, _version_from_properties, ClusterConfig, HUE_VERSION
from desktop.redaction import logfilter
from desktop.redaction.engine import RedactionPolicy, RedactionRule
from desktop.views import check_config, home, generate_configspec, load_confs, collect_validation_messages, _get_config_errors
if sys.version_info[0] > 2:
from io import StringIO as string_io
from unittest.mock import patch, Mock
from django.urls import re_path
else:
from cStringIO import StringIO as string_io
from mock import patch, Mock
from django.conf.urls import url as re_path
LOG = logging.getLogger(__name__)
def test_home():
c = make_logged_in_client(username="test_home", groupname="test_home", recreate=True, is_superuser=False)
user = User.objects.get(username="test_home")
response = c.get(reverse(home))
assert_equal(sorted(["notmine", "trash", "mine", "history"]), sorted(list(json.loads(response.context[0]['json_tags']).keys())))
assert_equal(200, response.status_code)
from pig.models import PigScript
script, created = PigScript.objects.get_or_create(owner=user)
doc = Document.objects.link(script, owner=script.owner, name='test_home')
response = c.get(reverse(home))
assert_true(str(doc.id) in json.loads(response.context[0]['json_documents']))
response = c.get(reverse(home))
tags = json.loads(response.context[0]['json_tags'])
assert_equal([doc.id], tags['mine'][0]['docs'], tags)
assert_equal([], tags['trash']['docs'], tags)
assert_equal([], tags['history']['docs'], tags)
doc.send_to_trash()
response = c.get(reverse(home))
tags = json.loads(response.context[0]['json_tags'])
assert_equal([], tags['mine'][0]['docs'], tags)
assert_equal([doc.id], tags['trash']['docs'], tags)
assert_equal([], tags['history']['docs'], tags)
doc.restore_from_trash()
response = c.get(reverse(home))
tags = json.loads(response.context[0]['json_tags'])
assert_equal([doc.id], tags['mine'][0]['docs'], tags)
assert_equal([], tags['trash']['docs'], tags)
assert_equal([], tags['history']['docs'], tags)
doc.add_to_history()
response = c.get(reverse(home))
tags = json.loads(response.context[0]['json_tags'])
assert_equal([], tags['mine'][0]['docs'], tags)
assert_equal([], tags['trash']['docs'], tags)
assert_equal([], tags['history']['docs'], tags) # We currently don't fetch [doc.id]
def test_skip_wizard():
c = make_logged_in_client() # is_superuser
response = c.get('/', follow=True)
assert_true(
['admin_wizard.mako' in _template.filename for _template in response.templates],
[_template.filename for _template in response.templates]
)
c.cookies['hueLandingPage'] = 'home'
response = c.get('/', follow=True)
assert_true(
['home.mako' in _template.filename for _template in response.templates],
[_template.filename for _template in response.templates]
)
c.cookies['hueLandingPage'] = ''
response = c.get('/', follow=True)
assert_true(
['admin_wizard.mako' in _template.filename for _template in response.templates],
[_template.filename for _template in response.templates]
)
c = make_logged_in_client(username="test_skip_wizard", password="test_skip_wizard", is_superuser=False)
response = c.get('/', follow=True)
assert_true(
['home.mako' in _template.filename for _template in response.templates],
[_template.filename for _template in response.templates]
)
c.cookies['hueLandingPage'] = 'home'
response = c.get('/', follow=True)
assert_true(
['home.mako' in _template.filename for _template in response.templates],
[_template.filename for _template in response.templates]
)
c.cookies['hueLandingPage'] = ''
response = c.get('/', follow=True)
assert_true(
['home.mako' in _template.filename for _template in response.templates],
[_template.filename for _template in response.templates]
)
def test_public_views():
c = Client()
for view in DJANGO_VIEW_AUTH_WHITELIST:
if view is serve:
url = reverse(view, kwargs={'path': 'desktop/art/favicon.ico'})
else:
url = reverse(view)
response = c.get(url)
assert_equal(200, response.status_code)
def test_prometheus_view():
if not desktop.conf.ENABLE_PROMETHEUS.get():
raise SkipTest
ALL_PROMETHEUS_METRICS = [
'django_http_requests_before_middlewares_total',
'django_http_responses_before_middlewares_total',
'django_http_requests_latency_including_middlewares_seconds',
'django_http_requests_unknown_latency_including_middlewares_total',
'django_http_requests_latency_seconds_by_view_method',
'django_http_requests_unknown_latency_total',
'django_http_ajax_requests_total',
'django_http_requests_total_by_method',
'django_http_requests_total_by_transport',
'django_http_requests_total_by_view_transport_method',
'django_http_requests_body_total_bytes',
'django_http_responses_total_by_templatename',
'django_http_responses_total_by_status',
'django_http_responses_body_total_bytes',
'django_http_responses_total_by_charset',
'django_http_responses_streaming_total',
'django_http_exceptions_total_by_type',
'django_http_exceptions_total_by_view',
]
c = Client()
response = c.get('/metrics')
for metric in ALL_PROMETHEUS_METRICS:
metric = metric if isinstance(metric, bytes) else metric.encode('utf-8')
if metric not in desktop.metrics.ALLOWED_DJANGO_PROMETHEUS_METRICS:
assert_false(metric in response.content, 'metric: %s \n %s' % (metric, response.content))
else:
assert_true(metric in response.content, 'metric: %s \n %s' % (metric, response.content))
def test_log_view():
c = make_logged_in_client()
URL = reverse(views.log_view)
LOG = logging.getLogger(__name__)
LOG.warning('une voix m’a réveillé')
# UnicodeDecodeError: 'ascii' codec can't decode byte... should not happen
response = c.get(URL)
assert_equal(200, response.status_code)
c = make_logged_in_client()
URL = reverse(views.log_view)
LOG = logging.getLogger(__name__)
LOG.warning('Got response: PK\x03\x04\n\x00\x00\x08\x00\x00\xad\x0cN?\x00\x00\x00\x00')
# DjangoUnicodeDecodeError: 'utf8' codec can't decode byte 0xad in position 75: invalid start byte... should not happen
response = c.get(URL)
assert_equal(200, response.status_code)
def test_download_log_view():
c = make_logged_in_client()
URL = reverse(views.download_log_view)
LOG = logging.getLogger(__name__)
LOG.warning(u'une voix m’a réveillé')
# UnicodeDecodeError: 'ascii' codec can't decode byte... should not happen
response = c.get(URL)
assert_equal("application/zip", response.get('Content-Type', ''))
def hue_version():
global HUE_VERSION
HUE_VERSION_BAK = HUE_VERSION
try:
assert_equal('cdh6.x-SNAPSHOT', _version_from_properties(string_io(
"""# Autogenerated build properties
version=3.9.0-cdh5.9.0-SNAPSHOT
git.hash=f5fbe90b6a1d0c186b0ddc6e65ce5fc8d24725c8
cloudera.cdh.release=cdh6.x-SNAPSHOT
cloudera.hash=f5fbe90b6a1d0c186b0ddc6e65ce5fc8d24725c8aaaaa"""))
)
assert_false(_version_from_properties(string_io(
"""# Autogenerated build properties
version=3.9.0-cdh5.9.0-SNAPSHOT
git.hash=f5fbe90b6a1d0c186b0ddc6e65ce5fc8d24725c8
cloudera.hash=f5fbe90b6a1d0c186b0ddc6e65ce5fc8d24725c8aaaaa"""))
)
assert_false(_version_from_properties(string_io('')))
finally:
HUE_VERSION = HUE_VERSION_BAK
def test_prefs():
c = make_logged_in_client()
# Get everything
response = c.get('/desktop/api2/user_preferences/')
assert_equal({}, json.loads(response.content)['data'])
# Set and get
response = c.post('/desktop/api2/user_preferences/foo', {'set': 'bar'})
assert_equal('bar', json.loads(response.content)['data']['foo'])
response = c.get('/desktop/api2/user_preferences/')
assert_equal('bar', json.loads(response.content)['data']['foo'])
# Reset (use post this time)
c.post('/desktop/api2/user_preferences/foo', {'set': 'baz'})
response = c.get('/desktop/api2/user_preferences/foo')
assert_equal('baz', json.loads(response.content)['data']['foo'])
# Check multiple values
c.post('/desktop/api2/user_preferences/elephant', {'set': 'room'})
response = c.get('/desktop/api2/user_preferences/')
assert_true("baz" in list(json.loads(response.content)['data'].values()), response.content)
assert_true("room" in list(json.loads(response.content)['data'].values()), response.content)
# Delete everything
c.post('/desktop/api2/user_preferences/elephant', {'delete': ''})
c.post('/desktop/api2/user_preferences/foo', {'delete': ''})
response = c.get('/desktop/api2/user_preferences/')
assert_equal({}, json.loads(response.content)['data'])
# Check non-existent value
response = c.get('/desktop/api2/user_preferences/doesNotExist')
assert_equal(None, json.loads(response.content)['data'])
def test_status_bar():
"""
Subs out the status_bar_views registry with temporary examples.
Tests handling of errors on view functions.
"""
backup = views._status_bar_views
views._status_bar_views = []
c = make_logged_in_client()
views.register_status_bar_view(lambda _: HttpResponse("foo", status=200))
views.register_status_bar_view(lambda _: HttpResponse("bar"))
views.register_status_bar_view(lambda _: None)
def f(r):
raise Exception()
views.register_status_bar_view(f)
response = c.get("/desktop/status_bar")
assert_equal(b"foobar", response.content)
views._status_bar_views = backup
def test_paginator():
"""
Test that the paginator works with partial list.
"""
def assert_page(page, data, start, end):
assert_equal(page.object_list, data)
assert_equal(page.start_index(), start)
assert_equal(page.end_index(), end)
# First page 1-20
obj = list(range(20))
pgn = Paginator(obj, per_page=20)
assert_page(pgn.page(1), obj, 1, 20)
# Handle extra data on first page (22 items on a 20-page)
obj = list(range(22))
pgn = Paginator(obj, per_page=20)
assert_page(pgn.page(1), list(range(20)), 1, 20)
# Handle total < len(obj). Only works for QuerySet.
obj = query.QuerySet()
obj._result_cache = list(range(10))
pgn = Paginator(obj, per_page=10)
assert_page(pgn.page(1), list(range(10)), 1, 10)
# Still works with a normal complete list
obj = list(range(25))
pgn = Paginator(obj, per_page=20)
assert_page(pgn.page(1), list(range(20)), 1, 20)
assert_page(pgn.page(2), list(range(20, 25)), 21, 25)
def test_thread_dump():
c = make_logged_in_client()
response = c.get("/desktop/debug/threads", HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert_true(b"test_thread_dump" in response.content)
def test_truncating_model():
class TinyModel(TruncatingModel):
short_field = CharField(max_length=10)
non_string_field = SmallIntegerField()
a = TinyModel()
a.short_field = 'a' * 9 # One less than it's max length
assert_true(a.short_field == 'a' * 9, 'Short-enough field does not get truncated')
a.short_field = 'a' * 11 # One more than it's max_length
assert_true(a.short_field == 'a' * 10, 'Too-long field gets truncated')
a.non_string_field = 10**10
assert_true(a.non_string_field == 10**10, 'non-string fields are not truncated')
def test_error_handling():
raise SkipTest
restore_django_debug = desktop.conf.DJANGO_DEBUG_MODE.set_for_testing(False)
restore_500_debug = desktop.conf.HTTP_500_DEBUG_MODE.set_for_testing(False)
exc_msg = "error_raising_view: Test earráid handling"
def error_raising_view(request, *args, **kwargs):
raise Exception(exc_msg)
def popup_exception_view(request, *args, **kwargs):
raise PopupException(exc_msg, title="earráid", detail=exc_msg)
# Add an error view
error_url_pat = [
re_path('^500_internal_error$', error_raising_view),
re_path('^popup_exception$', popup_exception_view)
]
desktop.urls.urlpatterns.extend(error_url_pat)
try:
def store_exc_info(*args, **kwargs):
pass
# Disable the test client's exception forwarding
c = make_logged_in_client()
c.store_exc_info = store_exc_info
response = c.get('/500_internal_error')
assert_true(any(["500.mako" in _template.filename for _template in response.templates]))
assert_true('Thank you for your patience' in response.content)
assert_true(exc_msg not in response.content)
# Now test the 500 handler with backtrace
desktop.conf.HTTP_500_DEBUG_MODE.set_for_testing(True)
response = c.get('/500_internal_error')
assert_equal(response.template.name, 'Technical 500 template')
assert_true(exc_msg in response.content)
# PopupException
response = c.get('/popup_exception')
assert_true(any(["popup_error.mako" in _template.filename for _template in response.templates]))
assert_true(exc_msg in response.content)
finally:
# Restore the world
for i in error_url_pat:
desktop.urls.urlpatterns.remove(i)
restore_django_debug()
restore_500_debug()
def test_desktop_permissions():
USERNAME = 'test_core_permissions'
GROUPNAME = 'default'
desktop.conf.REDIRECT_WHITELIST.set_for_testing('^\/.*$,^http:\/\/testserver\/.*$')
c = make_logged_in_client(USERNAME, groupname=GROUPNAME, recreate=True, is_superuser=False)
# Access to the basic works
assert_equal(200, c.get('/hue/accounts/login/', follow=True).status_code)
assert_equal(200, c.get('/accounts/logout', follow=True).status_code)
assert_equal(200, c.get('/home', follow=True).status_code)
def test_app_permissions():
USERNAME = 'test_app_permissions'
GROUPNAME = 'impala_only'
resets = [
desktop.conf.REDIRECT_WHITELIST.set_for_testing('^\/.*$,^http:\/\/testserver\/.*$'),
HAS_SQL_ENABLED.set_for_testing(False)
]
try:
c = make_logged_in_client(USERNAME, groupname=GROUPNAME, recreate=True, is_superuser=False)
user = rewrite_user(User.objects.get(username=USERNAME))
# Reset all perms
GroupPermission.objects.filter(group__name=GROUPNAME).delete()
def check_app(status_code, app_name):
if app_name in DESKTOP_APPS:
assert_equal(
status_code,
c.get('/' + app_name, follow=True).status_code,
'status_code=%s app_name=%s' % (status_code, app_name))
# Access to nothing
check_app(401, 'beeswax')
check_app(401, 'hive')
check_app(401, 'impala')
check_app(401, 'hbase')
check_app(401, 'pig')
check_app(401, 'search')
check_app(401, 'spark')
check_app(401, 'oozie')
apps = ClusterConfig(user=user).get_apps()
assert_false('hive' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('impala' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('pig' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('solr' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('spark' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('browser' in apps, apps)
assert_false('scheduler' in apps, apps)
assert_false('dashboard' in apps, apps)
assert_false('scheduler' in apps, apps)
assert_false('sdkapps' in apps, apps)
# Should always be enabled as it is a lib
grant_access(USERNAME, GROUPNAME, "beeswax")
# Add access to hive
grant_access(USERNAME, GROUPNAME, "hive")
check_app(200, 'beeswax')
check_app(200, 'hive')
check_app(401, 'impala')
check_app(401, 'hbase')
check_app(401, 'pig')
check_app(401, 'search')
check_app(401, 'spark')
check_app(401, 'oozie')
apps = ClusterConfig(user=user).get_apps()
assert_true('hive' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('impala' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('pig' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('solr' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('spark' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('browser' in apps, apps)
assert_false('scheduler' in apps, apps)
assert_false('dashboard' in apps, apps)
assert_false('scheduler' in apps, apps)
assert_false('sdkapps' in apps, apps)
# Add access to hbase
grant_access(USERNAME, GROUPNAME, "hbase")
check_app(200, 'beeswax')
check_app(200, 'hive')
check_app(401, 'impala')
check_app(200, 'hbase')
check_app(401, 'pig')
check_app(401, 'search')
check_app(401, 'spark')
check_app(401, 'oozie')
apps = ClusterConfig(user=user).get_apps()
assert_true('hive' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('impala' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('pig' in apps.get('editor', {}).get('interpreter_names', []), apps)
if 'hbase' not in desktop.conf.APP_BLACKLIST.get():
assert_true('browser' in apps, apps)
assert_true('hbase' in apps['browser']['interpreter_names'], apps['browser'])
assert_false('solr' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('spark' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('scheduler' in apps, apps)
assert_false('dashboard' in apps, apps)
assert_false('scheduler' in apps, apps)
assert_false('sdkapps' in apps, apps)
# Reset all perms
GroupPermission.objects.filter(group__name=GROUPNAME).delete()
check_app(401, 'beeswax')
check_app(401, 'impala')
check_app(401, 'hbase')
check_app(401, 'pig')
check_app(401, 'search')
check_app(401, 'spark')
check_app(401, 'oozie')
apps = ClusterConfig(user=user).get_apps()
assert_false('hive' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('impala' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('pig' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('solr' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('spark' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('browser' in apps, apps)
assert_false('scheduler' in apps, apps)
assert_false('dashboard' in apps, apps)
assert_false('scheduler' in apps, apps)
assert_false('sdkapps' in apps, apps)
# Test only impala perm
grant_access(USERNAME, GROUPNAME, "impala")
check_app(401, 'beeswax')
check_app(200, 'impala')
check_app(401, 'hbase')
check_app(401, 'pig')
check_app(401, 'search')
check_app(401, 'spark')
check_app(401, 'oozie')
apps = ClusterConfig(user=user).get_apps()
assert_false('hive' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_true('impala' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('pig' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('solr' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('spark' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('browser' in apps, apps)
assert_false('scheduler' in apps, apps)
assert_false('dashboard' in apps, apps)
assert_false('scheduler' in apps, apps)
assert_false('sdkapps' in apps, apps)
# Oozie Editor and Browser
grant_access(USERNAME, GROUPNAME, "oozie")
check_app(401, 'hive')
check_app(200, 'impala')
check_app(401, 'hbase')
check_app(401, 'pig')
check_app(401, 'search')
check_app(401, 'spark')
check_app(200, 'oozie')
apps = ClusterConfig(user=user).get_apps()
assert_true('scheduler' in apps, apps)
assert_false('browser' in apps, apps) # Actually should be true, but logic not implemented
assert_false('solr' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('spark' in apps.get('editor', {}).get('interpreter_names', []), apps)
grant_access(USERNAME, GROUPNAME, "pig")
check_app(401, 'hive')
check_app(200, 'impala')
check_app(401, 'hbase')
check_app(200, 'pig')
check_app(401, 'search')
check_app(401, 'spark')
check_app(200, 'oozie')
apps = ClusterConfig(user=user).get_apps()
assert_false('hive' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_true('impala' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_true('pig' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('solr' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('spark' in apps.get('editor', {}).get('interpreter_names', []), apps)
if 'search' not in desktop.conf.APP_BLACKLIST.get():
grant_access(USERNAME, GROUPNAME, "search")
check_app(401, 'hive')
check_app(200, 'impala')
check_app(401, 'hbase')
check_app(200, 'pig')
check_app(200, 'search')
check_app(401, 'spark')
check_app(200, 'oozie')
apps = ClusterConfig(user=user).get_apps()
assert_false('hive' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_true('impala' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_true('pig' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_true('solr' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_false('spark' in apps.get('editor', {}).get('interpreter_names', []), apps)
if 'spark' not in desktop.conf.APP_BLACKLIST.get():
grant_access(USERNAME, GROUPNAME, "spark")
check_app(401, 'hive')
check_app(200, 'impala')
check_app(401, 'hbase')
check_app(200, 'pig')
check_app(200, 'search')
check_app(200, 'spark')
check_app(200, 'oozie')
apps = ClusterConfig(user=user).get_apps()
assert_false('hive' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_true('impala' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_true('pig' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_true('solr' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_true('spark' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_true('pyspark' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_true('r' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_true('jar' in apps.get('editor', {}).get('interpreter_names', []), apps)
assert_true('py' in apps.get('editor', {}).get('interpreter_names', []), apps)
finally:
for f in resets:
f()
def test_error_handling_failure():
# Change rewrite_user to call has_hue_permission
# Try to get filebrowser page
# test for default 500 page
# Restore rewrite_user
import desktop.auth.backend
c = make_logged_in_client()
restore_django_debug = desktop.conf.DJANGO_DEBUG_MODE.set_for_testing(False)
restore_500_debug = desktop.conf.HTTP_500_DEBUG_MODE.set_for_testing(False)
original_rewrite_user = desktop.auth.backend.rewrite_user
def rewrite_user(user):
user = original_rewrite_user(user)
delattr(user, 'has_hue_permission')
return user
original_rewrite_user = desktop.auth.backend.rewrite_user
desktop.auth.backend.rewrite_user = rewrite_user
try:
# Make sure we are showing default 500.html page.
# See django.test.client#L246
assert_raises(AttributeError, c.get, reverse('desktop.views.threads'))
finally:
# Restore the world
restore_django_debug()
restore_500_debug()
desktop.auth.backend.rewrite_user = original_rewrite_user
def test_404_handling():
view_name = '/the-view-that-is-not-there'
c = make_logged_in_client()
response = c.get(view_name)
assert_true(any(['404.mako' in _template.filename for _template in response.templates]), response.templates)
assert_true(b'not found' in response.content)
if not isinstance(view_name, bytes):
view_name = view_name.encode('utf-8')
assert_true(view_name in response.content)
class RecordingHandler(logging.Handler):
def __init__(self, *args, **kwargs):
logging.Handler.__init__(self, *args, **kwargs)
self.records = []
def emit(self, r):
self.records.append(r)
def test_log_event():
c = make_logged_in_client()
root = logging.getLogger("desktop.views.log_frontend_event")
handler = RecordingHandler()
root.addHandler(handler)
c.get("/desktop/log_frontend_event?level=info&message=foo")
assert_equal("INFO", handler.records[-1].levelname)
assert_equal("Untrusted log event from user test: foo", handler.records[-1].message)
assert_equal("desktop.views.log_frontend_event", handler.records[-1].name)
c.get("/desktop/log_frontend_event?level=error&message=foo2")
assert_equal("ERROR", handler.records[-1].levelname)
assert_equal("Untrusted log event from user test: foo2", handler.records[-1].message)
c.get("/desktop/log_frontend_event?message=foo3")
assert_equal("INFO", handler.records[-1].levelname)
assert_equal("Untrusted log event from user test: foo3", handler.records[-1].message)
c.post("/desktop/log_frontend_event", {
"message": "01234567" * 1024})
assert_equal("INFO", handler.records[-1].levelname)
assert_equal("Untrusted log event from user test: ",
handler.records[-1].message)
root.removeHandler(handler)
def test_validate_path():
with tempfile.NamedTemporaryFile() as local_file:
reset = desktop.conf.SSL_PRIVATE_KEY.set_for_testing(local_file.name)
assert_equal([], validate_path(desktop.conf.SSL_PRIVATE_KEY, is_dir=False))
reset()
try:
reset = desktop.conf.SSL_PRIVATE_KEY.set_for_testing('/tmm/does_not_exist')
assert_not_equal([], validate_path(desktop.conf.SSL_PRIVATE_KEY, is_dir=True))
assert_true(False)
except Exception as ex:
assert_true('does not exist' in str(ex), ex)
finally:
reset()
@attr('integration')
@attr('requires_hadoop')
def test_config_check():
with tempfile.NamedTemporaryFile() as cert_file:
with tempfile.NamedTemporaryFile() as key_file:
reset = (
desktop.conf.SECRET_KEY.set_for_testing(''),
desktop.conf.SECRET_KEY_SCRIPT.set_for_testing(present=False),
desktop.conf.SSL_CERTIFICATE.set_for_testing(cert_file.name),
desktop.conf.SSL_PRIVATE_KEY.set_for_testing(key_file.name),
desktop.conf.DEFAULT_SITE_ENCODING.set_for_testing('klingon')
)
cli = make_logged_in_client()
try:
resp = cli.get('/desktop/debug/check_config')
assert_true('Secret key should be configured' in resp.content, resp)
assert_true('klingon' in resp.content, resp)
assert_true('Encoding not supported' in resp.content, resp)
finally:
for old_conf in reset:
old_conf()
prev_env_conf = os.environ.get("HUE_CONF_DIR")
try:
# Set HUE_CONF_DIR and make sure check_config returns appropriate conf
os.environ["HUE_CONF_DIR"] = "/tmp/test_hue_conf_dir"
def validate_by_spec(error_list):
pass
# Monkey patch as this will fail as the conf dir doesn't exist
if not hasattr(desktop.views, 'real_validate_by_spec'):
desktop.views.real_validate_by_spec = desktop.views.validate_by_spec
desktop.views.validate_by_spec = validate_by_spec
resp = cli.get('/desktop/debug/check_config')
assert_true('/tmp/test_hue_conf_dir' in resp.content, resp)
finally:
if prev_env_conf is None:
os.environ.pop("HUE_CONF_DIR", None)
else:
os.environ["HUE_CONF_DIR"] = prev_env_conf
desktop.views.validate_by_spec = desktop.views.real_validate_by_spec
def test_last_access_time():
raise SkipTest
c = make_logged_in_client(username="access_test")
c.post('/hue/accounts/login/')
login = desktop.auth.views.get_current_users()
before_access_time = time.time()
response = c.get('/home')
after_access_time = time.time()
access = desktop.auth.views.get_current_users()
user = response.context[0]['user']
login_time = login[user]['time']
access_time = access[user]['time']
# Check that 'last_access_time' is later than login time
assert_true(login_time < access_time)
# Check that 'last_access_time' is in between the timestamps before and after the last access path
assert_true(before_access_time < access_time)
assert_true(access_time < after_access_time)
def test_ui_customizations():
if desktop.conf.is_lb_enabled(): # Assumed that live cluster connects to direct Hue
custom_message = 'You are accessing a non-optimized Hue, please switch to one of the available addresses'
else:
custom_message = 'test ui customization'
reset = (
desktop.conf.CUSTOM.BANNER_TOP_HTML.set_for_testing(custom_message),
desktop.conf.CUSTOM.LOGIN_SPLASH_HTML.set_for_testing(custom_message),
)
try:
c = make_logged_in_client()
c.logout()
if not isinstance(custom_message, bytes):
custom_message = custom_message.encode('utf-8')
resp = c.get('/hue/accounts/login/', follow=False)
assert_true(custom_message in resp.content, resp)
resp = c.get('/hue/about', follow=True)
assert_true(custom_message in resp.content, resp)
finally:
for old_conf in reset:
old_conf()
@attr('integration')
@attr('requires_hadoop')
def test_check_config_ajax():
c = make_logged_in_client()
response = c.get(reverse(check_config))
assert_true("misconfiguration" in response.content, response.content)
def test_cx_Oracle():
"""
Tests that cx_Oracle (external dependency) is built correctly.
"""
if 'ORACLE_HOME' not in os.environ and 'ORACLE_INSTANTCLIENT_HOME' not in os.environ:
raise SkipTest
try:
import cx_Oracle
return
except ImportError as ex:
if "No module named" in ex.message:
assert_true(False, "cx_Oracle skipped its build. This happens if "
"env var ORACLE_HOME or ORACLE_INSTANTCLIENT_HOME is not defined. "
"So ignore this test failure if your build does not need to work "
"with an oracle backend.")
class TestStrictRedirection(object):
def setUp(self):
self.finish = desktop.conf.AUTH.BACKEND.set_for_testing(['desktop.auth.backend.AllowFirstUserDjangoBackend'])
self.client = make_logged_in_client()
self.user = dict(username="test", password="test")
desktop.conf.REDIRECT_WHITELIST.set_for_testing('^\/.*$,^http:\/\/example.com\/.*$')
def tearDown(self):
self.finish()
def test_redirection_blocked(self):
# Redirection with code 301 should be handled properly
# Redirection with Status code 301 example reference: http://www.somacon.com/p145.php
self._test_redirection(redirection_url='http://www.somacon.com/color/html_css_table_border_styles.php',
expected_status_code=403)
# Redirection with code 302 should be handled properly
self._test_redirection(redirection_url='http://www.google.com',
expected_status_code=403)
def test_redirection_allowed(self):
# Redirection to the host where Hue is running should be OK.
self._test_redirection(redirection_url='/', expected_status_code=302)
self._test_redirection(redirection_url='/pig', expected_status_code=302)
self._test_redirection(redirection_url='http://testserver/', expected_status_code=302)
self._test_redirection(redirection_url='https://testserver/', expected_status_code=302, **{
'SERVER_PORT': '443',
'wsgi.url_scheme': 'https',
})
self._test_redirection(redirection_url='http://example.com/', expected_status_code=302)
def _test_redirection(self, redirection_url, expected_status_code, **kwargs):
data = self.user.copy()
data['next'] = redirection_url
response = self.client.post('/hue/accounts/login/', data, **kwargs)
assert_equal(expected_status_code, response.status_code)
if expected_status_code == 403:
error_msg = 'Redirect to ' + redirection_url + ' is not allowed.'
if not isinstance(error_msg, bytes):
error_msg = error_msg.encode('utf-8')
assert_true(error_msg in response.content, response.content)
class BaseTestPasswordConfig(object):
SCRIPT = '%s -c "print(\'\\n password from script \\n\')"' % sys.executable
def get_config_password(self):
raise NotImplementedError
def get_config_password_script(self):
raise NotImplementedError
def get_password(self):
raise NotImplementedError
def test_read_password_from_script(self):
self._run_test_read_password_from_script_with(present=False)
self._run_test_read_password_from_script_with(data=None)
self._run_test_read_password_from_script_with(data='')
def _run_test_read_password_from_script_with(self, **kwargs):
resets = [
self.get_config_password().set_for_testing(**kwargs),
self.get_config_password_script().set_for_testing(self.SCRIPT),
]
try:
assert_equal(self.get_password(), ' password from script ', 'pwd: %s, kwargs: %s' % (self.get_password(), kwargs))
finally:
for reset in resets:
reset()
def test_config_password_overrides_script_password(self):
resets = [
self.get_config_password().set_for_testing(' password from config '),
self.get_config_password_script().set_for_testing(self.SCRIPT),
]
try:
assert_equal(self.get_password(), ' password from config ')
finally:
for reset in resets:
reset()
def test_password_script_raises_exception(self):
resets = [
self.get_config_password().set_for_testing(present=False),
self.get_config_password_script().set_for_testing(
'%s -c "import sys; sys.exit(1)"' % sys.executable
),
]
try:
assert_raises(subprocess.CalledProcessError, self.get_password)
finally:
for reset in resets:
reset()
resets = [
self.get_config_password().set_for_testing(present=False),
self.get_config_password_script().set_for_testing('/does-not-exist'),
]
try:
assert_raises(subprocess.CalledProcessError, self.get_password)
finally:
for reset in resets:
reset()
class TestSecretKeyConfig(BaseTestPasswordConfig):
def get_config_password(self):
return desktop.conf.SECRET_KEY
def get_config_password_script(self):
return desktop.conf.SECRET_KEY_SCRIPT
def get_password(self):
return desktop.conf.get_secret_key()
class TestDatabasePasswordConfig(BaseTestPasswordConfig):
def get_config_password(self):
return desktop.conf.DATABASE.PASSWORD
def get_config_password_script(self):
return desktop.conf.DATABASE.PASSWORD_SCRIPT
def get_password(self):
return desktop.conf.get_database_password()
class TestLDAPPasswordConfig(BaseTestPasswordConfig):
def get_config_password(self):
return desktop.conf.AUTH_PASSWORD
def get_config_password_script(self):
return desktop.conf.AUTH_PASSWORD_SCRIPT
def get_password(self):
# We are using dynamic_default now, so we need to cheat for the tests as only using set_for_testing(present=False) will trigger it.
if desktop.conf.AUTH_PASSWORD.get():
return desktop.conf.AUTH_PASSWORD.get()
else:
return self.get_config_password_script().get()
class TestLDAPBindPasswordConfig(BaseTestPasswordConfig):
def setup(self):
self.finish = desktop.conf.LDAP.LDAP_SERVERS.set_for_testing({'test': {}})
def teardown(self):
self.finish()
def get_config_password(self):
return desktop.conf.LDAP.LDAP_SERVERS['test'].BIND_PASSWORD
def get_config_password_script(self):
return desktop.conf.LDAP.LDAP_SERVERS['test'].BIND_PASSWORD_SCRIPT
def get_password(self):
return desktop.conf.get_ldap_bind_password(desktop.conf.LDAP.LDAP_SERVERS['test'])
class TestSMTPPasswordConfig(BaseTestPasswordConfig):
def get_config_password(self):
return desktop.conf.SMTP.PASSWORD
def get_config_password_script(self):
return desktop.conf.SMTP.PASSWORD_SCRIPT
def get_password(self):
return desktop.conf.get_smtp_password()
class TestDocument(object):
def setUp(self):
make_logged_in_client(username="original_owner", groupname="test_doc", recreate=True, is_superuser=False)
self.user = User.objects.get(username="original_owner")
make_logged_in_client(username="copy_owner", groupname="test_doc", recreate=True, is_superuser=False)
self.copy_user = User.objects.get(username="copy_owner")
self.document2 = Document2.objects.create(
name='Test Document2',
type='search-dashboard',
owner=self.user,
description='Test Document2'
)
self.document = Document.objects.link(
content_object=self.document2,
owner=self.user,
name='Test Document',
description='Test Document',
extra='test'
)
self.document.save()
self.document2.doc.add(self.document)
def tearDown(self):
# Get any Doc2 objects that were created and delete them, Doc1 child objects will be deleted in turn
test_docs = Document2.objects.filter(name__contains='Test Document2')
test_docs.delete()
def test_document_create(self):
assert_true(Document2.objects.filter(name='Test Document2').exists())
assert_true(Document.objects.filter(name='Test Document').exists())
assert_equal(Document2.objects.get(name='Test Document2').id, self.document2.id)
assert_equal(Document.objects.get(name='Test Document').id, self.document.id)
def test_document_trashed_and_restore(self):
home_dir = Directory.objects.get_home_directory(self.user)
test_dir, created = Directory.objects.get_or_create(
parent_directory=home_dir,
owner=self.user,
name='test_dir'
)
test_doc = Document2.objects.create(
name='Test Document2',
type='search-dashboard',
owner=self.user,
description='Test Document2',
parent_directory=test_dir
)
child_dir, created = Directory.objects.get_or_create(
parent_directory=test_dir,
owner=self.user,
name='child_dir'
)
test_doc1 = Document2.objects.create(
name='Test Document2',
type='search-dashboard',
owner=self.user,
description='Test Document2',
parent_directory=child_dir
)
assert_false(test_dir.is_trashed)
assert_false(test_doc.is_trashed)
assert_false(child_dir.is_trashed)
assert_false(test_doc1.is_trashed)
try:
test_dir.trash()
test_dir = Document2.objects.get(id=test_dir.id)
test_doc = Document2.objects.get(id=test_doc.id)
child_dir = Document2.objects.get(id=child_dir.id)
test_doc1 = Document2.objects.get(id=test_doc1.id)
assert_true(test_doc.is_trashed)
assert_true(test_dir.is_trashed)
assert_true(child_dir.is_trashed)
assert_true(test_doc1.is_trashed)
# Test restore
test_dir.restore()
test_dir = Document2.objects.get(id=test_dir.id)
test_doc = Document2.objects.get(id=test_doc.id)
child_dir = Document2.objects.get(id=child_dir.id)
test_doc1 = Document2.objects.get(id=test_doc1.id)
assert_false(test_doc.is_trashed)
assert_false(test_dir.is_trashed)
assert_false(child_dir.is_trashed)
assert_false(test_doc1.is_trashed)
finally:
test_doc.delete()
test_dir.delete()
test_doc1.delete()
child_dir.delete()
def test_multiple_home_directories(self):
home_dir = Directory.objects.get_home_directory(self.user)
test_doc1 = Document2.objects.create(
name='test-doc1',
type='query-hive',
owner=self.user,
description='',
parent_directory=home_dir
)
assert_equal(home_dir.children.exclude(name__in=['.Trash', 'Gist']).count(), 2)
# Cannot create second home directory directly as it will fail in Document2.validate()
second_home_dir = Document2.objects.create(owner=self.user, parent_directory=None, name='second_home_dir', type='directory')
Document2.objects.filter(name='second_home_dir').update(name=Document2.HOME_DIR, parent_directory=None)
assert_equal(Document2.objects.filter(owner=self.user, name=Document2.HOME_DIR).count(), 2)
test_doc2 = Document2.objects.create(
name='test-doc2',
type='query-hive',
owner=self.user,
description='',
parent_directory=second_home_dir
)
assert_equal(second_home_dir.children.count(), 1)
merged_home_dir = Directory.objects.get_home_directory(self.user)
children = merged_home_dir.children.all()
assert_equal(children.exclude(name__in=['.Trash', 'Gist']).count(), 3)
children_names = [child.name for child in children]
assert_true(test_doc2.name in children_names)
assert_true(test_doc1.name in children_names)
def test_multiple_trash_directories(self):
home_dir = Directory.objects.get_home_directory(self.user)
test_doc1 = Document2.objects.create(
name='test-doc1',
type='query-hive',
owner=self.user,
description='',
parent_directory=home_dir
)
assert_equal(home_dir.children.count(), 3)
# Cannot create second trash directory directly as it will fail in Document2.validate()
Document2.objects.create(owner=self.user, parent_directory=home_dir, name='second_trash_dir', type='directory')
Document2.objects.filter(name='second_trash_dir').update(name=Document2.TRASH_DIR)
assert_equal(Directory.objects.filter(owner=self.user, name=Document2.TRASH_DIR).count(), 2)
test_doc2 = Document2.objects.create(
name='test-doc2',
type='query-hive',
owner=self.user,
description='',
parent_directory=home_dir
)
assert_equal(home_dir.children.count(), 5) # Including the second trash
assert_raises(Document2.MultipleObjectsReturned, Directory.objects.get, name=Document2.TRASH_DIR)
test_doc1.trash()
assert_equal(home_dir.children.count(), 3) # As trash documents are merged count is back to 3
merged_trash_dir = Directory.objects.get(name=Document2.TRASH_DIR, owner=self.user)
test_doc2.trash()
children = merged_trash_dir.children.all()
assert_equal(children.count(), 2)
children_names = [child.name for child in children]
assert_true(test_doc2.name in children_names)
assert_true(test_doc1.name in children_names)
def test_document_copy(self):
raise SkipTest
name = 'Test Document2 Copy'
self.doc2_count = Document2.objects.count()
self.doc1_count = Document.objects.count()
doc2 = self.document2.copy(name=name, owner=self.copy_user, description=self.document2.description)
doc = self.document.copy(doc2, name=name, owner=self.copy_user, description=self.document2.description)
# Test that copying creates another object
assert_equal(Document2.objects.count(), self.doc2_count + 1)
assert_equal(Document.objects.count(), self.doc1_count)
# Test that the content object is not pointing to the same object
assert_not_equal(self.document2.doc, doc2.doc)
# Test that the owner is attributed to the new user
assert_equal(doc2.owner, self.copy_user)
# Test that copying enables attribute overrides
assert_equal(Document2.objects.filter(name=name).count(), 1)
assert_equal(doc2.description, self.document2.description)
# Test that the content object is not pointing to the same object
assert_not_equal(self.document.content_object, doc.content_object)
# Test that the owner is attributed to the new user
assert_equal(doc.owner, self.copy_user)
# Test that copying enables attribute overrides
assert_equal(Document.objects.filter(name=name).count(), 1)
assert_equal(doc.description, self.document.description)
def test_redact_statements(self):
old_policies = redaction.global_redaction_engine.policies
redaction.global_redaction_engine.policies = [
RedactionPolicy([
RedactionRule('', 'ssn=\d{3}-\d{2}-\d{4}', 'ssn=XXX-XX-XXXX'),
])
]
logfilter.add_log_redaction_filter_to_logger(redaction.global_redaction_engine, logging.root)
sensitive_query = 'SELECT "ssn=123-45-6789"'
redacted_query = 'SELECT "ssn=XXX-XX-XXXX"'
nonsensitive_query = 'SELECT "hello"'
snippets = [
{
'status': 'ready',
'viewSettings': {
'sqlDialect': True,
'snippetImage': '/static/beeswax/art/icon_beeswax_48.png',
'placeHolder': 'Example: SELECT * FROM tablename, or press CTRL + space',
'aceMode': 'ace/mode/hive'
},
'id': '10a29cda-063f-1439-4836-d0c460154075',
'statement_raw': sensitive_query,
'statement': sensitive_query,
'type': 'hive'
},
{
'status': 'ready',
'viewSettings': {
'sqlDialect': True,
'snippetImage': '/static/impala/art/icon_impala_48.png',
'placeHolder': 'Example: SELECT * FROM tablename, or press CTRL + space',
'aceMode': 'ace/mode/impala'
},
'id': 'e17d195a-beb5-76bf-7489-a9896eeda67a',
'statement_raw': sensitive_query,
'statement': sensitive_query,
'type': 'impala'
},
{
'status': 'ready',
'viewSettings': {
'sqlDialect': True,
'snippetImage': '/static/beeswax/art/icon_beeswax_48.png',
'placeHolder': 'Example: SELECT * FROM tablename, or press CTRL + space',
'aceMode': 'ace/mode/hive'
},
'id': '10a29cda-063f-1439-4836-d0c460154075',
'statement_raw': nonsensitive_query,
'statement': nonsensitive_query,
'type': 'hive'
},
]
try:
self.document2.type = 'notebook'
self.document2.update_data({'snippets': snippets})
self.document2.search = sensitive_query
self.document2.save()
saved_snippets = self.document2.data_dict['snippets']
# Make sure redacted queries are redacted.
assert_equal(redacted_query, saved_snippets[0]['statement'])
assert_equal(redacted_query, saved_snippets[0]['statement_raw'])
assert_equal(True, saved_snippets[0]['is_redacted'])
assert_equal(redacted_query, saved_snippets[1]['statement'])
assert_equal(redacted_query, saved_snippets[1]['statement_raw'])
assert_equal(True, saved_snippets[1]['is_redacted'])
document = Document2.objects.get(pk=self.document2.pk)
assert_equal(redacted_query, document.search)
# Make sure unredacted queries are not redacted.
assert_equal(nonsensitive_query, saved_snippets[2]['statement'])
assert_equal(nonsensitive_query, saved_snippets[2]['statement_raw'])
assert_false('is_redacted' in saved_snippets[2])
finally:
redaction.global_redaction_engine.policies = old_policies
def test_get_document(self):
c1 = make_logged_in_client(username='test_get_user', groupname='test_get_group', recreate=True, is_superuser=False)
r1 = c1.get('/desktop/api/doc/get?id=1')
assert_true(-1, json.loads(r1.content)['status'])
def test_session_secure_cookie():
with tempfile.NamedTemporaryFile() as cert_file:
with tempfile.NamedTemporaryFile() as key_file:
resets = [
desktop.conf.SSL_CERTIFICATE.set_for_testing(cert_file.name),
desktop.conf.SSL_PRIVATE_KEY.set_for_testing(key_file.name),
desktop.conf.SESSION.SECURE.set_for_testing(False),
]
try:
assert_true(desktop.conf.is_https_enabled())
assert_false(desktop.conf.SESSION.SECURE.get())
finally:
for reset in resets:
reset()
resets = [
desktop.conf.SSL_CERTIFICATE.set_for_testing(cert_file.name),
desktop.conf.SSL_PRIVATE_KEY.set_for_testing(key_file.name),
desktop.conf.SESSION.SECURE.set_for_testing(True),
]
try:
assert_true(desktop.conf.is_https_enabled())
assert_true(desktop.conf.SESSION.SECURE.get())
finally:
for reset in resets:
reset()
resets = [
desktop.conf.SSL_CERTIFICATE.set_for_testing(cert_file.name),
desktop.conf.SSL_PRIVATE_KEY.set_for_testing(key_file.name),
desktop.conf.SESSION.SECURE.set_for_testing(present=False),
]
try:
assert_true(desktop.conf.is_https_enabled())
assert_true(desktop.conf.SESSION.SECURE.get())
finally:
for reset in resets:
reset()
resets = [
desktop.conf.SSL_CERTIFICATE.set_for_testing(present=None),
desktop.conf.SSL_PRIVATE_KEY.set_for_testing(present=None),
desktop.conf.SESSION.SECURE.set_for_testing(present=False),
]
try:
assert_false(desktop.conf.is_https_enabled())
assert_false(desktop.conf.SESSION.SECURE.get())
finally:
for reset in resets:
reset()
def test_get_data_link():
assert_equal(None, get_data_link({}))
assert_equal('gethue.com', get_data_link({'type': 'link', 'link': 'gethue.com'}))
assert_equal(
'/hbase/#Cluster/document_demo/query/20150527',
get_data_link({'type': 'hbase', 'table': 'document_demo', 'row_key': '20150527'})
)
assert_equal(
'/hbase/#Cluster/document_demo/query/20150527[f1]',
get_data_link({'type': 'hbase', 'table': 'document_demo', 'row_key': '20150527', 'fam': 'f1'})
)
assert_equal(
'/hbase/#Cluster/document_demo/query/20150527[f1:c1]',
get_data_link({'type': 'hbase', 'table': 'document_demo', 'row_key': '20150527', 'fam': 'f1', 'col': 'c1'})
)
assert_equal('/filebrowser/view=/data/hue/1', get_data_link({'type': 'hdfs', 'path': '/data/hue/1'}))
assert_equal('/metastore/table/default/sample_07', get_data_link({'type': 'hive', 'database': 'default', 'table': 'sample_07'}))
def test_get_dn():
assert_equal(['*'], desktop.conf.get_dn(''))
assert_equal(['*'], desktop.conf.get_dn('localhost'))
assert_equal(['*'], desktop.conf.get_dn('localhost.localdomain'))
assert_equal(['*'], desktop.conf.get_dn('hue'))
assert_equal(['*'], desktop.conf.get_dn('hue.com'))
assert_equal(['.hue.com'], desktop.conf.get_dn('sql.hue.com'))
assert_equal(['.hue.com'], desktop.conf.get_dn('finance.sql.hue.com'))
assert_equal(['.hue.com'], desktop.conf.get_dn('bank.finance.sql.hue.com'))
def test_collect_validation_messages_default():
try:
# Generate the spec file
configspec = generate_configspec()
# Load the .ini files
config_dir = os.getenv("HUE_CONF_DIR", get_desktop_root("conf"))
conf = load_confs(configspec.name, _configs_from_dir(config_dir))
# This is for the hue.ini file only
error_list = []
collect_validation_messages(conf, error_list)
assert_equal(len(error_list), 0, error_list)
finally:
os.remove(configspec.name)
def test_collect_validation_messages_extras():
try:
# Generate the spec file
configspec = generate_configspec()
# Load the .ini files
config_dir = os.getenv("HUE_CONF_DIR", get_desktop_root("conf"))
conf = load_confs(configspec.name, _configs_from_dir(config_dir))
test_conf = ConfigObj()
test_conf['extrasection'] = {
'key1': 'value1',
'key2': 'value1'
}
extrasubsection = {
'key1': 'value1',
'key2': 'value1'
}
# Test with extrasections as well as existing subsection, keyvalues in existing section [desktop]
test_conf['desktop'] = {
'extrasubsection': extrasubsection,
'extrakey': 'value1',
'auth': {
'ignore_username_case': 'true',
'extrasubsubsection': {
'extrakey': 'value1'
}
}
}
conf.merge(test_conf)
error_list = []
collect_validation_messages(conf, error_list)
finally:
os.remove(configspec.name)
assert_equal(len(error_list), 1)
assert_equal(u'Extra section, extrasection in the section: top level, Extra keyvalue, extrakey in the section: [desktop] , '
'Extra section, extrasubsection in the section: [desktop] , Extra section, extrasubsubsection in the section: [desktop] [[auth]] ',
error_list[0]['message']
)
# Test db migration from 5.7,...,5.15 to latest
def test_db_migrations_sqlite():
versions = ['5.' + str(i) for i in range(7, 16)]
for version in versions:
name = 'hue_' + version + '_' + uuid.uuid4().hex
file_name = 'hue_' + version + '.db'
path = get_desktop_root('./core/src/desktop/test_data/' + file_name)
DATABASES[name] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': path,
'USER': '',
'SCHEMA': 'public',
'PASSWORD': '',
'HOST': '',
'PORT': '',
'OPTIONS': {} if sys.version_info[0] > 2 else '',
'ATOMIC_REQUESTS': True,
'CONN_MAX_AGE': 0,
}
try:
call_command('migrate', '--fake-initial', '--database=' + name)
finally:
del DATABASES[name]
def test_db_migrations_mysql():
if desktop.conf.DATABASE.ENGINE.get().find('mysql') < 0:
raise SkipTest
versions = ['5_' + str(i) for i in range(7, 16)]
os.putenv('PATH', '$PATH:/usr/local/bin')
try:
subprocess.check_output('type mysql', shell=True)
except subprocess.CalledProcessError as e:
LOG.warning('mysql not found')
raise SkipTest
for version in versions:
file_name = 'hue_' + version + '_mysql.sql'
name = 'hue_' + version + '_' + uuid.uuid4().hex
path = get_desktop_root('./core/src/desktop/test_data/' + file_name)
DATABASES[name] = {
'ENGINE': desktop.conf.DATABASE.ENGINE.get(),
'NAME': name,
'USER': desktop.conf.DATABASE.USER.get(),
'SCHEMA': name,
'PASSWORD': desktop.conf.get_database_password(),
'HOST': desktop.conf.DATABASE.HOST.get(),
'PORT': str(desktop.conf.DATABASE.PORT.get()),
'OPTIONS': force_dict_to_strings(desktop.conf.DATABASE.OPTIONS.get()),
'ATOMIC_REQUESTS': True,
'PATH': path,
'CONN_MAX_AGE': desktop.conf.DATABASE.CONN_MAX_AGE.get(),
}
try:
subprocess.check_output(
'mysql -u%(USER)s -p%(PASSWORD)s -e "CREATE DATABASE %(SCHEMA)s"' % DATABASES[name], stderr=subprocess.STDOUT, shell=True
) # No way to run this command with django
subprocess.check_output(
'mysql -u%(USER)s -p%(PASSWORD)s %(SCHEMA)s < %(PATH)s' % DATABASES[name], stderr=subprocess.STDOUT, shell=True
)
call_command('migrate', '--fake-initial', '--database=%(SCHEMA)s' % DATABASES[name])
except subprocess.CalledProcessError as e:
LOG.warning('stderr: {}'.format(e.output))
raise e
finally:
del DATABASES[name]
@raises(ImportError)
def test_forbidden_libs():
if sys.version_info[0] > 2:
raise SkipTest
import chardet # chardet license (LGPL) is not compatible and should not be bundled
class TestGetConfigErrors():
def setUp(self):
self.client = make_logged_in_client(username="test", groupname="empty", recreate=True, is_superuser=False)
self.user = User.objects.get(username="test")
def test_get_config_errors_unicode(self):
"""
Avoid a Python 2 issue:
AttributeError: 'unicode' object has no attribute 'get_fully_qualifying_key'
"""
request = Mock(user=self.user)
with patch('desktop.views.appmanager') as appmanager:
appmanager.DESKTOP_MODULES = [
Mock(
conf=Mock(
config_validator=lambda user: [(u'Connector 1', 'errored because of ...')]
)
)
]
assert_equal(
[{'name': 'Connector 1', 'message': 'errored because of ...'}],
_get_config_errors(request, cache=False)
)
| {
"content_hash": "200ffb24ceacb19ecff3c689c9cbcdb0",
"timestamp": "",
"source": "github",
"line_count": 1569,
"max_line_length": 137,
"avg_line_length": 36.55066921606119,
"alnum_prop": 0.6699100230173677,
"repo_name": "kawamon/hue",
"id": "4ff80a2050f910db3b8b058693d14c0590eed468",
"size": "58174",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/src/desktop/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'camera.py'
copyright = u'2014, Matej Smid'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'camerapydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'camerapy.tex', u'camera.py Documentation',
u'Matej Smid', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'camerapy', u'camera.py Documentation',
[u'Matej Smid'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'camerapy', u'camera.py Documentation',
u'Matej Smid', 'camerapy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| {
"content_hash": "4d611b0672aefb49e0540766ea343056",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 80,
"avg_line_length": 32.10480349344978,
"alnum_prop": 0.7007616974972797,
"repo_name": "smidm/camera.py",
"id": "34874b05303800935daa32708e9f9528119161c2",
"size": "7772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23839"
},
{
"name": "Shell",
"bytes": "22"
}
],
"symlink_target": ""
} |
import copy
import os
import sys
import time
import unittest
rootDirectory = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if rootDirectory not in sys.path:
sys.path.insert(0, rootDirectory)
from oxford.vision import Vision
class TestFace(unittest.TestCase):
'''Tests the Project Oxford Vision API'''
@classmethod
def setUpClass(cls):
# set up self.client for tests
cls.client = Vision(os.environ['OXFORD_VISION_API_KEY'])
cls.localFilePrefix = os.path.join(rootDirectory, 'tests', 'images')
cls.analyzeOptions = {
'ImageType': True,
'Color': True,
'Faces': True,
'Adult': True,
'Categories': True,
'Tags': True,
'Description': True,
'Celebrities': True,
}
cls.thumbnailOptions = {
'width': 100,
'height': 100,
'smartCropping': True
}
cls.ocrOptions = {
'language': 'en',
'detectOrientation': True
}
#
# test the analyze API
#
def _verify_analyze_result(self, result):
self.assertIsNotNone(result['imageType'])
self.assertIsNotNone(result['color'])
self.assertIsNotNone(result['faces'])
self.assertIsNotNone(result['adult'])
self.assertIsNotNone(result['categories'])
def test_vision_analyze_file(self):
options = copy.copy(self.analyzeOptions)
options['path'] = os.path.join(self.localFilePrefix, 'vision.jpg')
result = self.client.analyze(options)
self._verify_analyze_result(result)
def test_vision_analyze_url(self):
options = copy.copy(self.analyzeOptions)
options['url'] = 'https://upload.wikimedia.org/wikipedia/commons/1/19/Bill_Gates_June_2015.jpg'
result = self.client.analyze(options)
self._verify_analyze_result(result)
def test_vision_analyze_stream(self):
options = copy.copy(self.analyzeOptions)
with open(os.path.join(self.localFilePrefix, 'face1.jpg'), 'rb') as file:
options['stream'] = file.read()
result = self.client.analyze(options)
self._verify_analyze_result(result)
#
# test the thumbnail API
#
def _verify_thumbnail_result(self, result, fileName):
outputPath = os.path.join(self.localFilePrefix, fileName)
with open(outputPath, 'wb+') as file: file.write(result)
self.assertTrue(True, 'file write succeeded for: {0}'.format(fileName))
def test_vision_thumbnail_file(self):
options = copy.copy(self.thumbnailOptions)
options['path'] = os.path.join(self.localFilePrefix, 'vision.jpg')
result = self.client.thumbnail(options)
self._verify_thumbnail_result(result, 'thumbnail_from_file.jpg')
def test_vision_thumbnail_url(self):
options = copy.copy(self.thumbnailOptions)
options['url'] = 'https://upload.wikimedia.org/wikipedia/commons/1/19/Bill_Gates_June_2015.jpg'
result = self.client.thumbnail(options)
self._verify_thumbnail_result(result, 'thumbnail_from_url.jpg')
def test_vision_thumbnail_stream(self):
options = copy.copy(self.thumbnailOptions)
with open(os.path.join(self.localFilePrefix, 'face1.jpg'), 'rb') as file:
options['stream'] = file.read()
result = self.client.thumbnail(options)
self._verify_thumbnail_result(result, 'thumbnail_from_stream.jpg')
#
# test the OCR API
#
def _verify_ocr_result(self, result):
self.assertIsNotNone(result['language'])
self.assertIsNotNone(result['orientation'])
def test_vision_ocr_file(self):
options = copy.copy(self.ocrOptions)
options['path'] = os.path.join(self.localFilePrefix, 'vision.jpg')
result = self.client.ocr(options)
self._verify_ocr_result(result)
def test_vision_ocr_url(self):
options = copy.copy(self.ocrOptions)
options['url'] = 'https://upload.wikimedia.org/wikipedia/commons/1/19/Bill_Gates_June_2015.jpg'
result = self.client.ocr(options)
self._verify_ocr_result(result)
def test_vision_ocr_stream(self):
options = copy.copy(self.ocrOptions)
with open(os.path.join(self.localFilePrefix, 'face1.jpg'), 'rb') as file:
options['stream'] = file.read()
result = self.client.ocr(options)
self._verify_ocr_result(result)
@classmethod
def TearDownUpClass(cls):
time.sleep(0.5) # sleep time in seconds
| {
"content_hash": "dc5f0fd3e81a3b359c2b75c2c73ae8ac",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 103,
"avg_line_length": 35.68217054263566,
"alnum_prop": 0.6269824027807951,
"repo_name": "chidochipotle/oxford",
"id": "ae57d96cbeb818dbb55697d793d94cfcd01cb783",
"size": "4603",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_vision.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "94101"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.conf import settings
from zerver.models import get_client, UserProfile
from zerver.lib.response import json_success
from zerver.lib.validator import check_dict
from zerver.decorator import authenticated_api_view, REQ, has_request_variables, to_non_negative_int, flexible_boolean
from zerver.views.messages import send_message_backend
from zerver.lib.webhooks.git import get_push_commits_event_message,\
SUBJECT_WITH_BRANCH_TEMPLATE, get_force_push_commits_event_message, \
get_remove_branch_event_message, get_pull_request_event_message,\
get_issue_event_message, SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE,\
get_commits_comment_action_message
import logging
import re
import ujson
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Text
from zerver.lib.str_utils import force_str
from django.http import HttpRequest, HttpResponse
ZULIP_TEST_REPO_NAME = 'zulip-test'
ZULIP_TEST_REPO_ID = 6893087
def is_test_repository(repository):
# type: (Mapping[Text, Any]) -> bool
return repository['name'] == ZULIP_TEST_REPO_NAME and repository['id'] == ZULIP_TEST_REPO_ID
class UnknownEventType(Exception):
pass
def github_pull_request_content(payload):
# type: (Mapping[Text, Any]) -> Text
pull_request = payload['pull_request']
action = get_pull_request_or_issue_action(payload)
if action in ('opened', 'edited'):
return get_pull_request_event_message(
payload['sender']['login'],
action,
pull_request['html_url'],
pull_request['number'],
pull_request['head']['ref'],
pull_request['base']['ref'],
pull_request['body'],
get_pull_request_or_issue_assignee(pull_request)
)
return get_pull_request_event_message(
payload['sender']['login'],
action,
pull_request['html_url'],
pull_request['number']
)
def github_issues_content(payload):
# type: (Mapping[Text, Any]) -> Text
issue = payload['issue']
action = get_pull_request_or_issue_action(payload)
if action in ('opened', 'edited'):
return get_issue_event_message(
payload['sender']['login'],
action,
issue['html_url'],
issue['number'],
issue['body'],
get_pull_request_or_issue_assignee(issue)
)
return get_issue_event_message(
payload['sender']['login'],
action,
issue['html_url'],
issue['number'],
)
def github_object_commented_content(payload, type):
# type: (Mapping[Text, Any], Text) -> Text
comment = payload['comment']
issue = payload['issue']
action = u'[commented]({}) on'.format(comment['html_url'])
return get_pull_request_event_message(
comment['user']['login'],
action,
issue['html_url'],
issue['number'],
message=comment['body'],
type=type
)
def get_pull_request_or_issue_action(payload):
# type: (Mapping[Text, Any]) -> Text
return 'synchronized' if payload['action'] == 'synchronize' else payload['action']
def get_pull_request_or_issue_assignee(object_payload):
# type: (Mapping[Text, Any]) -> Optional[Text]
assignee_dict = object_payload.get('assignee')
if assignee_dict:
return assignee_dict.get('login')
return None
def get_pull_request_or_issue_subject(repository, payload_object, type):
# type: (Mapping[Text, Any], Mapping[Text, Any], Text) -> Text
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=repository['name'],
type=type,
id=payload_object['number'],
title=payload_object['title']
)
def github_generic_subject(noun, topic_focus, blob):
# type: (Text, Text, Mapping[Text, Any]) -> Text
# issue and pull_request objects have the same fields we're interested in
return u'%s: %s %d: %s' % (topic_focus, noun, blob['number'], blob['title'])
def api_github_v1(user_profile, event, payload, branches, stream, **kwargs):
# type: (UserProfile, Text, Mapping[Text, Any], Text, Text, **Any) -> Tuple[Text, Text, Text]
"""
processes github payload with version 1 field specification
`payload` comes in unmodified from github
`stream` is set to 'commits' if otherwise unset
"""
commit_stream = stream
issue_stream = 'issues'
return api_github_v2(user_profile, event, payload, branches, stream, commit_stream, issue_stream, **kwargs)
def api_github_v2(user_profile, event, payload, branches, default_stream,
commit_stream, issue_stream, topic_focus = None):
# type: (UserProfile, Text, Mapping[Text, Any], Text, Text, Text, Text, Optional[Text]) -> Tuple[Text, Text, Text]
"""
processes github payload with version 2 field specification
`payload` comes in unmodified from github
`default_stream` is set to what `stream` is in v1 above
`commit_stream` and `issue_stream` fall back to `default_stream` if they are empty
This and allowing alternative endpoints is what distinguishes v1 from v2 of the github configuration
"""
target_stream = commit_stream if commit_stream else default_stream
issue_stream = issue_stream if issue_stream else default_stream
repository = payload['repository']
updated_topic_focus = topic_focus if topic_focus else repository['name']
# Event Handlers
if event == 'pull_request':
subject = get_pull_request_or_issue_subject(repository, payload['pull_request'], 'PR')
content = github_pull_request_content(payload)
elif event == 'issues':
# in v1, we assume that this stream exists since it is
# deprecated and the few realms that use it already have the
# stream
target_stream = issue_stream
subject = get_pull_request_or_issue_subject(repository, payload['issue'], 'Issue')
content = github_issues_content(payload)
elif event == 'issue_comment':
# Comments on both issues and pull requests come in as issue_comment events
issue = payload['issue']
if 'pull_request' not in issue or issue['pull_request']['diff_url'] is None:
# It's an issues comment
target_stream = issue_stream
type = 'Issue'
subject = get_pull_request_or_issue_subject(repository, payload['issue'], type)
else:
# It's a pull request comment
type = 'PR'
subject = get_pull_request_or_issue_subject(repository, payload['issue'], type)
content = github_object_commented_content(payload, type)
elif event == 'push':
subject, content = build_message_from_gitlog(user_profile, updated_topic_focus,
payload['ref'], payload['commits'],
payload['before'], payload['after'],
payload['compare'],
payload['pusher']['name'],
forced=payload['forced'],
created=payload['created'],
deleted=payload['deleted'])
elif event == 'commit_comment':
subject = updated_topic_focus
comment = payload['comment']
action = u'[commented]({})'.format(comment['html_url'])
content = get_commits_comment_action_message(
comment['user']['login'],
action,
comment['html_url'].split('#', 1)[0],
comment['commit_id'],
comment['body'],
)
else:
raise UnknownEventType(force_str(u'Event %s is unknown and cannot be handled' % (event,)))
return target_stream, subject, content
@authenticated_api_view(is_webhook=True)
@has_request_variables
def api_github_landing(request, user_profile, event=REQ(),
payload=REQ(validator=check_dict([])),
branches=REQ(default=''),
stream=REQ(default=''),
version=REQ(converter=to_non_negative_int, default=1),
commit_stream=REQ(default=''),
issue_stream=REQ(default=''),
exclude_pull_requests=REQ(converter=flexible_boolean, default=False),
exclude_issues=REQ(converter=flexible_boolean, default=False),
exclude_commits=REQ(converter=flexible_boolean, default=False),
emphasize_branch_in_topic=REQ(converter=flexible_boolean, default=False),
):
# type: (HttpRequest, UserProfile, Text, Mapping[Text, Any], Text, Text, int, Text, Text, bool, bool, bool, bool) -> HttpResponse
repository = payload['repository']
# Special hook for capturing event data. If we see our special test repo, log the payload from github.
try:
if is_test_repository(repository) and settings.PRODUCTION:
with open('/var/log/zulip/github-payloads', 'a') as f:
f.write(ujson.dumps({'event': event,
'payload': payload,
'branches': branches,
'stream': stream,
'version': version,
'commit_stream': commit_stream,
'issue_stream': issue_stream,
'exclude_pull_requests': exclude_pull_requests,
'exclude_issues': exclude_issues,
'exclude_commits': exclude_commits,
'emphasize_branch_in_topic': emphasize_branch_in_topic,
}))
f.write('\n')
except Exception:
logging.exception('Error while capturing Github event')
if not stream:
stream = 'commits'
short_ref = re.sub(r'^refs/heads/', '', payload.get('ref', ''))
kwargs = dict()
if emphasize_branch_in_topic and short_ref:
kwargs['topic_focus'] = short_ref
allowed_events = set()
if not exclude_pull_requests:
allowed_events.add('pull_request')
if not exclude_issues:
allowed_events.add('issues')
allowed_events.add('issue_comment')
if not exclude_commits:
allowed_events.add('push')
allowed_events.add('commit_comment')
if event not in allowed_events:
return json_success()
# We filter issue_comment events for issue creation events
if event == 'issue_comment' and payload['action'] != 'created':
return json_success()
if event == 'push':
# If we are given a whitelist of branches, then we silently ignore
# any push notification on a branch that is not in our whitelist.
if branches and short_ref not in re.split('[\s,;|]+', branches):
return json_success()
# Map payload to the handler with the right version
if version == 2:
target_stream, subject, content = api_github_v2(user_profile, event, payload, branches,
stream, commit_stream, issue_stream,
**kwargs)
else:
target_stream, subject, content = api_github_v1(user_profile, event, payload, branches,
stream, **kwargs)
request.client = get_client('ZulipGitHubWebhook')
return send_message_backend(request, user_profile,
message_type_name='stream',
message_to=[target_stream],
forged=False, subject_name=subject,
message_content=content)
def build_message_from_gitlog(user_profile, name, ref, commits, before, after, url, pusher, forced=None, created=None, deleted=False):
# type: (UserProfile, Text, Text, List[Dict[str, str]], Text, Text, Text, Text, Optional[Text], Optional[Text], Optional[bool]) -> Tuple[Text, Text]
short_ref = re.sub(r'^refs/heads/', '', ref)
subject = SUBJECT_WITH_BRANCH_TEMPLATE.format(repo=name, branch=short_ref)
if re.match(r'^0+$', after):
content = get_remove_branch_event_message(pusher, short_ref)
# 'created' and 'forced' are github flags; the second check is for beanstalk
elif (forced and not created) or (forced is None and len(commits) == 0):
content = get_force_push_commits_event_message(pusher, url, short_ref, after[:7])
else:
commits = _transform_commits_list_to_common_format(commits)
content = get_push_commits_event_message(pusher, url, short_ref, commits, deleted=deleted)
return subject, content
def _transform_commits_list_to_common_format(commits):
# type: (List[Dict[str, Any]]) -> List[Dict[str, str]]
new_commits_list = []
for commit in commits:
new_commits_list.append({
'name': commit['author'].get('username'),
'sha': commit.get('id'),
'url': commit.get('url'),
'message': commit.get('message'),
})
return new_commits_list
| {
"content_hash": "170efb52dc729f5787c0be447986dd96",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 152,
"avg_line_length": 43.34304207119741,
"alnum_prop": 0.5922496826700515,
"repo_name": "vaidap/zulip",
"id": "c8323a25cb2fbb5d4c7ac37b91b6310e4e22c57e",
"size": "13393",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "zerver/webhooks/github/view.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "416449"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "472724"
},
{
"name": "JavaScript",
"bytes": "2123247"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "84574"
},
{
"name": "Python",
"bytes": "3669105"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "44486"
}
],
"symlink_target": ""
} |
from distutils.core import setup
import googstyle
setup(
name='googstyle',
version=googstyle.__version__,
description="CSS and images extracted from Closure Library",
url="https://github.com/ludios/Googstyle",
author="Ivan Kozik",
author_email="[email protected]",
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP',
'License :: OSI Approved :: Apache Software License',
],
packages=['googstyle'],
package_data={'googstyle': ['goog-images/*']},
)
| {
"content_hash": "ee86b934ca48f67867c13fe7dfd2268f",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 61,
"avg_line_length": 27.095238095238095,
"alnum_prop": 0.6994727592267135,
"repo_name": "ludios/Googstyle",
"id": "d4f2932177ca08758b5c76ad12da64e78233f64e",
"size": "592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "616"
}
],
"symlink_target": ""
} |
import constants
from euctwfreq import EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE, EUCTW_TYPICAL_DISTRIBUTION_RATIO
from euckrfreq import EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE, EUCKR_TYPICAL_DISTRIBUTION_RATIO
from gb2312freq import GB2312CharToFreqOrder, GB2312_TABLE_SIZE, GB2312_TYPICAL_DISTRIBUTION_RATIO
from big5freq import Big5CharToFreqOrder, BIG5_TABLE_SIZE, BIG5_TYPICAL_DISTRIBUTION_RATIO
from jisfreq import JISCharToFreqOrder, JIS_TABLE_SIZE, JIS_TYPICAL_DISTRIBUTION_RATIO
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
class CharDistributionAnalysis:
def __init__(self):
self._mCharToFreqOrder = None # Mapping table to get frequency order from char order (get from GetOrder())
self._mTableSize = None # Size of above table
self._mTypicalDistributionRatio = None # This is a constant value which varies from language to language, used in calculating confidence. See http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html for further detail.
self.reset()
def reset(self):
"""reset analyser, clear any state"""
self._mDone = constants.False # If this flag is set to constants.True, detection is done and conclusion has been made
self._mTotalChars = 0 # Total characters encountered
self._mFreqChars = 0 # The number of characters whose frequency order is less than 512
def feed(self, aStr, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aStr)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range, return negative answer
if self._mTotalChars <= 0:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = self._mFreqChars / ((self._mTotalChars - self._mFreqChars) * self._mTypicalDistributionRatio)
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion. For charset detection,
# certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aStr):
# We do not handle characters based on the original encoding string, but
# convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aStr):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
if aStr[0] >= '\xC4':
return 94 * (ord(aStr[0]) - 0xC4) + ord(aStr[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aStr):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
if aStr[0] >= '\xB0':
return 94 * (ord(aStr[0]) - 0xB0) + ord(aStr[1]) - 0xA1
else:
return -1;
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aStr):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
if (aStr[0] >= '\xB0') and (aStr[1] >= '\xA1'):
return 94 * (ord(aStr[0]) - 0xB0) + ord(aStr[1]) - 0xA1
else:
return -1;
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aStr):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
if aStr[0] >= '\xA4':
if aStr[1] >= '\xA1':
return 157 * (ord(aStr[0]) - 0xA4) + ord(aStr[1]) - 0xA1 + 63
else:
return 157 * (ord(aStr[0]) - 0xA4) + ord(aStr[1]) - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aStr):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
if (aStr[0] >= '\x81') and (aStr[0] <= '\x9F'):
order = 188 * (ord(aStr[0]) - 0x81)
elif (aStr[0] >= '\xE0') and (aStr[0] <= '\xEF'):
order = 188 * (ord(aStr[0]) - 0xE0 + 31)
else:
return -1;
order = order + ord(aStr[1]) - 0x40
if aStr[1] > '\x7F':
order =- 1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aStr):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
if aStr[0] >= '\xA0':
return 94 * (ord(aStr[0]) - 0xA1) + ord(aStr[1]) - 0xa1
else:
return -1
| {
"content_hash": "8f7fafffba622b8503d880c79b72226b",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 238,
"avg_line_length": 42.80346820809248,
"alnum_prop": 0.6272788656313302,
"repo_name": "JeyZeta/Dangerous",
"id": "1f95fc84828eceb0f38fb1464f359922781f0e28",
"size": "8590",
"binary": false,
"copies": "20",
"ref": "refs/heads/master",
"path": "Dangerous/Golismero/tools/sqlmap/thirdparty/chardet/chardistribution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13260"
},
{
"name": "C",
"bytes": "12851"
},
{
"name": "C++",
"bytes": "3174"
},
{
"name": "CSS",
"bytes": "267451"
},
{
"name": "HTML",
"bytes": "2686153"
},
{
"name": "JavaScript",
"bytes": "1356956"
},
{
"name": "Lua",
"bytes": "14436"
},
{
"name": "Makefile",
"bytes": "11190"
},
{
"name": "Objective-C",
"bytes": "998"
},
{
"name": "PHP",
"bytes": "619"
},
{
"name": "PLpgSQL",
"bytes": "536"
},
{
"name": "Perl",
"bytes": "263365"
},
{
"name": "Python",
"bytes": "16669102"
},
{
"name": "Roff",
"bytes": "9828"
},
{
"name": "Ruby",
"bytes": "503"
},
{
"name": "Shell",
"bytes": "6691"
}
],
"symlink_target": ""
} |
""" Creating correlation
"""
from world import world, setup_module, teardown_module
import create_source_steps as source_create
import create_dataset_steps as dataset_create
import create_correlation_steps as correlation_create
class TestCorrelation(object):
def setup(self):
"""
Debug information
"""
print "\n-------------------\nTests in: %s\n" % __name__
def teardown(self):
"""
Debug information
"""
print "\nEnd of tests in: %s\n-------------------\n" % __name__
def test_scenario1(self):
"""
Scenario: Successfully creating a correlation from a dataset:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a correlation from a dataset
And I wait until the correlation is ready less than <time_3> secs
And I update the correlation name to "<correlation_name>"
When I wait until the correlation is ready less than <time_4> secs
Then the correlation name is "<correlation_name>"
Examples:
| data | time_1 | time_2 | time_3 | time_4 | correlation_name |
| ../data/iris.csv | 10 | 10 | 10 | 10 | my new correlation name |
"""
print self.test_scenario1.__doc__
examples = [
['data/iris.csv', '10', '10', '10', '10', 'my new correlation name']]
for example in examples:
print "\nTesting with:\n", example
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
correlation_create.i_create_a_correlation_from_dataset(self)
correlation_create.the_correlation_is_finished_in_less_than(self, example[3])
correlation_create.i_update_correlation_name(self, example[5])
correlation_create.the_correlation_is_finished_in_less_than(self, example[4])
correlation_create.i_check_correlation_name(self, example[5])
| {
"content_hash": "74e5ceaf9c381fc9855083f2b3881d29",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 95,
"avg_line_length": 45.60377358490566,
"alnum_prop": 0.5829540752999586,
"repo_name": "xaowoodenfish/python-1",
"id": "886b0d61db78db7ce34d784f55dab970e36bd75a",
"size": "3036",
"binary": false,
"copies": "1",
"ref": "refs/heads/next",
"path": "bigml/tests/test_25_correlation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "691861"
}
],
"symlink_target": ""
} |
"""OpenTherm Gateway config flow."""
import asyncio
import pyotgw
from pyotgw import vars as gw_vars
from serial import SerialException
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_DEVICE,
CONF_ID,
CONF_NAME,
PRECISION_HALVES,
PRECISION_TENTHS,
PRECISION_WHOLE,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import DOMAIN
from .const import (
CONF_FLOOR_TEMP,
CONF_READ_PRECISION,
CONF_SET_PRECISION,
CONF_TEMPORARY_OVRD_MODE,
)
class OpenThermGwConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""OpenTherm Gateway Config Flow."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OpenThermGwOptionsFlow(config_entry)
async def async_step_init(self, info=None):
"""Handle config flow initiation."""
if info:
name = info[CONF_NAME]
device = info[CONF_DEVICE]
gw_id = cv.slugify(info.get(CONF_ID, name))
entries = [e.data for e in self._async_current_entries()]
if gw_id in [e[CONF_ID] for e in entries]:
return self._show_form({"base": "id_exists"})
if device in [e[CONF_DEVICE] for e in entries]:
return self._show_form({"base": "already_configured"})
async def test_connection():
"""Try to connect to the OpenTherm Gateway."""
otgw = pyotgw.pyotgw()
status = await otgw.connect(self.hass.loop, device)
await otgw.disconnect()
return status[gw_vars.OTGW].get(gw_vars.OTGW_ABOUT)
try:
res = await asyncio.wait_for(test_connection(), timeout=10)
except (asyncio.TimeoutError, SerialException):
return self._show_form({"base": "cannot_connect"})
if res:
return self._create_entry(gw_id, name, device)
return self._show_form()
async def async_step_user(self, user_input=None):
"""Handle manual initiation of the config flow."""
return await self.async_step_init(user_input)
async def async_step_import(self, import_config):
"""
Import an OpenTherm Gateway device as a config entry.
This flow is triggered by `async_setup` for configured devices.
"""
formatted_config = {
CONF_NAME: import_config.get(CONF_NAME, import_config[CONF_ID]),
CONF_DEVICE: import_config[CONF_DEVICE],
CONF_ID: import_config[CONF_ID],
}
return await self.async_step_init(info=formatted_config)
def _show_form(self, errors=None):
"""Show the config flow form with possible errors."""
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Required(CONF_NAME): str,
vol.Required(CONF_DEVICE): str,
vol.Optional(CONF_ID): str,
}
),
errors=errors or {},
)
def _create_entry(self, gw_id, name, device):
"""Create entry for the OpenTherm Gateway device."""
return self.async_create_entry(
title=name, data={CONF_ID: gw_id, CONF_DEVICE: device, CONF_NAME: name}
)
class OpenThermGwOptionsFlow(config_entries.OptionsFlow):
"""Handle opentherm_gw options."""
def __init__(self, config_entry):
"""Initialize the options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the opentherm_gw options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_READ_PRECISION,
default=self.config_entry.options.get(CONF_READ_PRECISION, 0),
): vol.All(
vol.Coerce(float),
vol.In(
[0, PRECISION_TENTHS, PRECISION_HALVES, PRECISION_WHOLE]
),
),
vol.Optional(
CONF_SET_PRECISION,
default=self.config_entry.options.get(CONF_SET_PRECISION, 0),
): vol.All(
vol.Coerce(float),
vol.In(
[0, PRECISION_TENTHS, PRECISION_HALVES, PRECISION_WHOLE]
),
),
vol.Optional(
CONF_TEMPORARY_OVRD_MODE,
default=self.config_entry.options.get(
CONF_TEMPORARY_OVRD_MODE, True
),
): bool,
vol.Optional(
CONF_FLOOR_TEMP,
default=self.config_entry.options.get(CONF_FLOOR_TEMP, False),
): bool,
}
),
)
| {
"content_hash": "9b97af572fe1a3e1db0b8009fc4ff7f9",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 86,
"avg_line_length": 33.99363057324841,
"alnum_prop": 0.5366310661420274,
"repo_name": "lukas-hetzenecker/home-assistant",
"id": "7c3bc8f8f6b538b33b6e4e2d1dafcad35d7a9d30",
"size": "5337",
"binary": false,
"copies": "11",
"ref": "refs/heads/dev",
"path": "homeassistant/components/opentherm_gw/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38023745"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
""" Tests for the utils module """
from __future__ import unicode_literals
from soco.utils import deprecated
# Deprecation decorator
def test_deprecation(recwarn):
@deprecated('0.7')
def dummy(args):
"""My docs"""
pass
@deprecated('0.8', 'better_function', '0.12')
def dummy2(args):
"""My docs"""
pass
assert dummy.__doc__ == "My docs\n\n .. deprecated:: 0.7\n"
assert dummy2.__doc__ == "My docs\n\n .. deprecated:: 0.8\n\n"\
" Will be removed in version 0.12.\n" \
" Use better_function instead."
dummy(3)
w = recwarn.pop()
assert str(w.message) == 'Call to deprecated function dummy.'
dummy2(4)
w = recwarn.pop()
assert str(w.message) == "Call to deprecated function dummy2. Will be " \
"removed in version 0.12. Use " \
"better_function instead."
assert w.filename
assert w.lineno
| {
"content_hash": "ffef88cecb712ee8224b4d7e0e175d3a",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 27.243243243243242,
"alnum_prop": 0.5357142857142857,
"repo_name": "intfrr/SoCo",
"id": "51e7eb5d97a190ecfe59bb857aadb362c57ffbc2",
"size": "1032",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/test_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "168"
},
{
"name": "Makefile",
"bytes": "66"
},
{
"name": "Python",
"bytes": "462657"
},
{
"name": "Shell",
"bytes": "342"
}
],
"symlink_target": ""
} |
"""Meta-estimators for building composite models with transformers
In addition to its current contents, this module will eventually be home to
refurbished versions of Pipeline and FeatureUnion.
"""
from ._column_transformer import ColumnTransformer, make_column_transformer
from ._target import TransformedTargetRegressor
__all__ = [
'ColumnTransformer',
'make_column_transformer',
'TransformedTargetRegressor',
]
| {
"content_hash": "b545cb44b3eaf8d7a485e1d5bfe3529d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 75,
"avg_line_length": 26.9375,
"alnum_prop": 0.7795823665893271,
"repo_name": "chrsrds/scikit-learn",
"id": "1cfd53c50d6829c0e7cb6aa993f71d4b9f124895",
"size": "431",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "sklearn/compose/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "C",
"bytes": "385829"
},
{
"name": "C++",
"bytes": "139482"
},
{
"name": "Makefile",
"bytes": "1388"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5255814"
},
{
"name": "Shell",
"bytes": "4031"
}
],
"symlink_target": ""
} |
class Function(object):
def sample(self, n):
raise NotImplementedError
def __call__(self, S):
raise NotImplementedError
@property
def parameters(self):
raise NotImplementedError
def gradient(self, S):
"""Gradients of the log-likelihood wrt the parameters."""
raise NotImplementedError
def project_parameters(self):
raise NotImplementedError
| {
"content_hash": "993895ffe1dc1e1d0624f5ee6b26f69d",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 65,
"avg_line_length": 23.22222222222222,
"alnum_prop": 0.65311004784689,
"repo_name": "net-titech/VidSum",
"id": "a901fb23888819084a2ac51ee8130e6cbff44b0d",
"size": "418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/packages/aistats-flid/code/functions/function.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "371"
},
{
"name": "C++",
"bytes": "8002"
},
{
"name": "Python",
"bytes": "51560"
},
{
"name": "TeX",
"bytes": "309506"
}
],
"symlink_target": ""
} |
import string
import glob
import time
import xml.etree.ElementTree as ET
from itertools import chain
# Import reader
import xlrd
import csv
import requests
# Import data handlers
import collections
# Import Network Analysis Tools
import networkx as nx
import igraph as ig
# Import language processing tools
from gensim import corpora, models
from nltk.corpus import stopwords
from nltk.stem.snowball import EnglishStemmer as ES
def main():
"""
Runs a standard analysis.
Put pdf files in an 'files' subfolder in the working
directory, and run the script.
"""
depth = "paragraph"
convert_pdfs()
xmls = get_filelist("files", "xml")
docs = []
for xml in xmls:
try:
docs.append(ET.ElementTree(file=xml))
except Exception, e:
print e, xml
continue
print "%s documents are in the corpus." %str(len(docs))
#docs = [ET.ElementTree(file=xml) for xml in xmls]
texts = [[p.text for p in doc.getroot().findall(".//*[@class='DoCO:TextChunk']")
if p.text != None]
for doc in docs]
perform_analysis("isis", content = texts,
model="lsa", depth=depth, num_topics=110,
show_topics = 20, num_words=20, threshold=0)
perform_analysis("isis", content = texts,
model="lda", depth=depth, num_topics = 20,
show_topics = 20, num_words=10)
def convert_pdfs():
"""
Converts pdfs to xml via
https://gist.github.com/yoavram/4351598
and http://pdfx.cs.man.ac.uk
It looks for unconverted pdfs.
"""
pdfs = get_filelist("files", "pdf")
pdfs = set([f.rstrip(".pdf").replace(" ", "") for f in pdfs])
xmls = get_filelist("files", "xml")
xmls = set([f.rstrip(".xml") for f in xmls])
filelist = pdfs - xmls
for pdf in filelist:
pypdfx(pdf)
def perform_analysis(keyword, content=None, testdata = None,
model="lsa", depth="document", num_topics = 20,
show_topics = 20, num_words = 20, threshold=0):
"""
Workflow for topic analysis.
Looks for earlier dicionary and corpus, if not creates them
from provided documents.
Creates either LSA or LDA model and evaluates it.
Output: nodes and edges csv for gephi, a topic csv and
a network visualization.
"""
try:
dictionary, corpus = load_dictionary(keyword, depth)
except Exception, e:
dictionary, corpus = preprocess_content(content, keyword, depth)
print "\nBeginning with analysis at %s." % time.ctime()
if model is "lsa":
_model = create_lsi_model(dictionary, corpus, num_topics)
if model is "lda":
_model = create_lda_model(dictionary, corpus, num_topics)
testdata = load_reference_texts(model)
evaluate_model(keyword, testdata, model, _model, num_words, threshold, depth)
#test_for_topic_convergence(keyword, testdata, model, _model, num_topics, threshold, depth)
export_matrix(keyword, dictionary, model, _model, show_topics, num_words, depth)
export_topic_list(keyword, dictionary, model, _model, show_topics, num_words, depth)
export_word_graph(keyword, dictionary, model, _model, show_topics, num_words, threshold, depth)
def get_filelist(path, extension):
"""
Creates a list of files in a folder with a given extension.
Navigate to this folder first.
"""
return [f for f in glob.glob("{0}/*.{1}".format(path, extension))]
def preprocess_content(content, keyword, depth="document"):
"""
Takes a list of documents, removes non-alphabetical characters,
removes a list of stopwords, performs stemming and creates
a dictionary and a corpus for this set of documents for re-use.
"""
print "\nBeginning with preprocessing at %s." % time.ctime()
if depth is "document":
if type(content[0]) is list:
documents = [" ".join(text) for text in content]
else:
documents = content
if depth is "paragraph":
documents = list(chain.from_iterable(content))
if depth is "sentence":
documents = list(chain.from_iterable(["".join(text).split(". ") for text in content]))
#filter out digits and special characters
delete_table = string.maketrans(string.ascii_lowercase,
' ' * len(string.ascii_lowercase))
# remove common words and tokenize
stope = stopwords.words("english")
#stoplist can be extended like this:
# stope.extend(["worda","wordb",...])
with open("stopwords.csv") as stopcsv:
reader = csv.reader(stopcsv)
for row in reader:
stope.extend(row)
print "\nThis is a raw input document:"
print documents[0]
#texts are cleaned (characters only), filtered (stopwords removed) and stemmed (reduced to word stem)
texts = [[ES().stem(str(word.encode("utf8")).translate(None, delete_table))
for word in document.lower().split()
if str(word.encode("utf8")).translate(None, delete_table) not in stope]
for document in documents]
# remove words that appear only once
all_tokens = sum(texts, [])
tokens_once = set(word for word in set(all_tokens)
if all_tokens.count(word) == 1)
texts = [[word for word in text
if word not in tokens_once and len(word) > 1]
for text in texts]
print "\nThis is the raw document after cleaning, filtering, stemming and removal of unique words."
print texts[0]
#create dictionary and save for later use
dictionary = corpora.Dictionary(texts)
dictionary.save('{0}_{1}.dict'.format(keyword, depth))
#create corpus and save for later use
corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize('{0}_{1}_corpus.mm'.format(keyword, depth), corpus)
return dictionary, corpus
def preprocess_query(query):
"""
Performs preprocessing steps for a query string.
Removing stopword, filtering for alphabet character only,
and stemming.
"""
try:
if type(query[0]) is list:
query = [" ".join(text) for text in query]
except Exception, e:
pass
if type(query) is list:
query = " ".join(query)
#filter out digits and special characters
delete_table = string.maketrans(string.ascii_lowercase,
' ' * len(string.ascii_lowercase))
# remove common words and tokenize
stope = stopwords.words("english")
#stoplist can be extended like this:
with open("stopwords.csv") as stopcsv:
reader = csv.reader(stopcsv)
for row in reader:
stope.extend(row)
query = [ES().stem(str(word.encode("utf8")).translate(None, delete_table))
for word in query.lower().split()
if str(word.encode("utf8")).translate(None, delete_table) not in stope]
return query
def load_dictionary(keyword, depth):
"""
Load dictionary and corpus from disk.
"""
dictionary = corpora.Dictionary.load('{0}_{1}.dict'.format(keyword, depth))
corpus = corpora.MmCorpus('{0}_{1}_corpus.mm'.format(keyword, depth))
return dictionary, corpus
def create_lsi_model(dictionary, corpus, num_topics):
"""
Perform an analysis with an LSI-Model.
"""
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]
return models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=num_topics)
def create_lda_model(dictionary, corpus, num_topics):
"""
Perform an analysis with an LDA-Model.
"""
return models.LdaModel(corpus, id2word=dictionary, num_topics=num_topics)
def load_reference_texts(model):
"""
Loads reference texts from disk.
Reference texts should be placed in a folder in the scripts
directory and have to be direct output from MaxQDA.
"""
with open("testdata/{0}_codes.csv".format(model)) as codes:
reader = csv.reader(codes)
translation = {row[0]:int(row[1]) for row in reader}
xls = xlrd.open_workbook("testdata/testdata.xls")
codings = xls.sheet_by_name("Codings")
topics = [row.value for row in codings.col(2)][1:] # get topic column; skip column names
topics = [topic for topic in topics]
topics = [translation[topic] for topic in topics] # recode from name to number
texts = [row.value for row in codings.col(6)][1:] # get text column; skip column names
testdata = zip(topics, texts)
return testdata
def evaluate_model(keyword, testdata, modelname, model, num_words, threshold, depth):
"""
Testdata has to be a list of tuples with form
[(topicnr, 'reference text')]
"""
dictionary, corpus = load_dictionary(keyword, depth)
export_evaluation_header(keyword, depth)
evaluations = []
for ref, text in testdata:
query = preprocess_query(text)
query_bow = dictionary.doc2bow(query)
query_model = model[query_bow]
results = sorted(query_model, key=lambda item: -item[1])
if modelname is "lsa":
evaluation = (ref, results[1][0]+1)
if modelname is "lda":
evaluation = (ref, results[0][0]+1)
evaluations.append(evaluation)
# evaluation = (referencetopic, lsa-result)
for ref in set(d[0] for d in testdata):
true_positives, true_negatives = 0, 0
false_positives, false_negatives = 0, 0
for evaluation in evaluations:
# # # apply test magic here # # #
if evaluation[0] == ref:
if evaluation[1] == ref:
true_positives += 1
if evaluation[0] == ref:
if evaluation[1] != ref:
false_negatives += 1
if evaluation[0] != ref:
if evaluation[1] == ref:
false_positives += 1
if evaluation[0] != ref:
if evaluation[1] == ref:
true_negatives += 1
# # # # # # # # # # # # # # # # #
test_pos = true_positives + false_positives
test_neg = false_negatives + true_negatives
cond_pos = true_positives + false_negatives
cond_neg = false_positives + true_negatives
total = len(evaluations)
if cond_pos != 0:
recall = float(true_positives) / float(cond_pos)
if cond_neg != 0:
specificity = float(true_negatives) / float(cond_neg)
else:
specificity = 0
if test_pos != 0:
precision = float(true_positives) / float(test_pos)
else:
precision = 0
if test_neg != 0:
neg_pred_value = float(true_negatives) / float(test_neg)
accuracy = float(true_positives + true_negatives) / float(total)
print "\nThe %s confusion table for %s on %s level and topic nr. %s is:" %(modelname, keyword, depth, str(ref))
print "TP: {0} FN: {1} \nFP: {2} TN: {3}".format(true_positives, false_negatives, false_positives, true_negatives)
print "Recall: %.4f" % recall
print "Specificity: %.4f" % specificity
print "Precision: %.4f" % precision
print "Neg. Predict. Value: %.4f" % neg_pred_value
print "Accuracy: %.4f \n" % accuracy
export_evaluation_results(keyword, depth, model, [str(ref),
str(total),
str(cond_pos),
str(true_positives),
str(false_negatives),
str(false_positives),
str(true_negatives),
str(recall),
str(specificity),
str(precision),
str(neg_pred_value),
str(accuracy)
])
def test_for_topic_convergence(keyword, testdata, modelname, model, show_topics, threshold, depth):
"""
This is experimental and not used in the analysis.
Especially for LDA it shows a visualization of topic-mapping.
"""
import matplotlib.pyplot as plt
import numpy as np
max_id = max(td[0] for td in testdata)
dictionary, corpus = load_dictionary(keyword, depth)
runs = 10
if modelname is "lsa":
convergence = np.zeros((max_id*runs+runs, model.projection.k+1))
if modelname is "lda":
convergence = np.zeros((max_id*runs+runs, show_topics+1))
i = 0
while i < runs:
evaluations = []
if modelname is "lda":
model = create_lda_model(dictionary, corpus, show_topics)
for ref, text in testdata:
query = preprocess_query(text)
query_bow = dictionary.doc2bow(query)
query_model = model[query_bow]
results = sorted(query_model, key=lambda item: -item[1])
if modelname is "lsa":
evaluation = (ref, results[1][0]+1)
if modelname is "lda":
evaluation = (ref, results[0][0]+1)
evaluations.append(evaluation)
# evaluation = (referencetopic, lsa-result)
conv = sorted(collections.Counter(evaluations).most_common(), key=lambda item: -item[0][0])
for con in conv:
if con[0][0] == ref:
convergence[(con[0][0]*runs)+i, con[0][1]] += con[1]
i += 1
row_max = np.amax(convergence+1, 1)
convergence = convergence / row_max[:,None]
plt.pcolor(np.log10((convergence+1)*10))
plt.show()
def export_evaluation_header(keyword, depth):
"""
auxiliary function
"""
with open("{0}_{1}_evaluation.csv".format(keyword, depth), "w") as evaluation:
evaluation.write("topicId,testdataSize,sampleSize,TP,FN,FP,TN,recall,Specificity,Precision,NegPredValue,Accuracy\n")
def export_evaluation_results(keyword, depth, model, *results):
"""
auxiliary function
"""
with open("{0}_{1}_evaluation.csv".format(keyword, depth), "a") as evaluation:
evaluation.write(",".join(*results)+"\n")
def export_word_graph(keyword, dictionary, modelname, model, num_topics, num_words, threshold, depth):
"""
Constructs a network of relations between words and topics.
This can be seen as a bipartite network, which is then transformed
into a unipartite network of word-word relations.
Of this network the giant component is taken and visualized.
"""
H = nx.Graph()
for word in dictionary.token2id.items():
H.add_node(word[1], text=word[0], partition=1)
n=0
for topic in model.show_topics(num_topics, num_words, formatted=False):
H.add_node(len(dictionary)+n+1, partition=0)
for word in range(num_words):
if topic[word][0] > threshold: #only positive weights
H.add_edge(len(dictionary)+n+1, dictionary.token2id[topic[word][1]])
n += 1
# construct bipartite graph with topics as 0 and words as 1
word_nodes, topic_nodes = nx.algorithms.bipartite.sets(H)
# create unipartite projection for words
W = nx.algorithms.bipartite.weighted_projected_graph(H, word_nodes)
# write to disk as GML
nx.write_gml(W, "{0}_{1}_{2}x{3}.gml".format(keyword+modelname, depth,
num_topics, num_words))
# read from disk as GML and create as igraph.Graph
G = ig.read("{0}_{1}_{2}x{3}.gml".format(keyword+modelname, depth,
num_topics, num_words), "gml")
# filter to giant component
gc = ig.VertexClustering(G).giant()
visual_style = {}
visual_style["layout"] = G.layout_fruchterman_reingold()
visual_style["vertex_size"] = 8
visual_style["vertex_label"] = G.vs["text"]
visual_style["edge_width"] = 0.5
visual_style["bbox"] = (1200, 1200)
visual_style["margin"] = 50
ig.plot(gc, "{0}_{1}_{2}x{3}_FR.svg".format(keyword+modelname, depth,
num_topics, num_words), **visual_style)
def export_topic_list(keyword, dictionary, modelname, model, num_topics, num_words, depth):
with open("{0}_{1}_{2}x{3}_topics.csv".format(keyword+modelname, depth,
num_topics, num_words), "w") as topics:
topics.write("Words,Weight\n")
with open("{0}_{1}_{2}x{3}_topics.csv".format(keyword+modelname, depth,
num_topics, num_words), "a") as topics:
n = 1
for t in model.show_topics(num_topics, num_words, formatted=False):
# item[0] are the correlations of the words in a topic
t = sorted(t, key=lambda item: -item[0])
topics.write("topic nr {0}, title\n".format(str(n)))
for word in range(num_words):
if t[word][0] > 0:
# word, weight
topics.write(str(t[word][1]) +"," + str(t[word][0]) + "\n")
n += 1
topics.write("\n")
def export_matrix(keyword, dictionary, modelname, model, show_topics, num_words, depth):
"""
Exports the results of the LSA into gephi-usable format.
The exported network is a bipartite one and needs to be transformed first,
before you can start with other graph algorithms.
Output: nodes.csv, edges.csv
"""
# write headers
with open("{0}_{1}_{2}x{3}_nodes.csv".format(keyword+modelname, depth,
show_topics, num_words), "w") as nodes:
nodes.write("Id,Label,Partition\n")
with open("{0}_{1}_{2}x{3}_edges.csv".format(keyword+modelname, depth,
show_topics, num_words), "w") as edges:
edges.write("Source,Target,Label,Weight\n")
with open("{0}_{1}_{2}x{3}_nodes.csv".format(keyword+modelname, depth,
show_topics, num_words), "a") as nodes:
for item in dictionary.token2id.items():
nodes.write(str(item[1]) + "," + str(item[0]) + "," + "Word" + "\n")
for i in range(show_topics):
nodes.write("{0},Topicnr {1},Topic\n".format(
str(len(dictionary) + i + 1),
str(i)))
with open("{0}_{1}_{2}x{3}_edges.csv".format(keyword+modelname, depth,
show_topics, num_words), "w") as edges:
n = 0
for t in model.show_topics(show_topics, num_words, formatted=False):
for word in range(num_words):
# topicnr, wordid, word, weight
edges.write(str(len(dictionary) + n + 1) +","
+ str(dictionary.token2id[t[word][1]]) + ","
+ str(t[word][1]) +","
+ str(t[word][0]) + "\n")
n += 1
def pypdfx(filename):
"""
Filename is a name of a pdf file WITHOUT the extension
The function will print messages, including the status code,
and will write the XML file to <filename>.xml
source: https://gist.github.com/yoavram/4351598
"""
url = "http://pdfx.cs.man.ac.uk"
fin = open(filename + '.pdf', 'rb')
files = {'file': fin}
try:
print 'Sending', filename, 'to', url
r = requests.post(url, files=files, headers={'Content-Type':'application/pdf'})
print 'Got status code', r.status_code
finally:
fin.close()
fout = open(filename.replace(" ","") + '.xml', 'w')
fout.write(r.content)
fout.close()
print 'Written to', filename.replace(" ","") + '.xml'
if __name__ == "__main__":
main() | {
"content_hash": "55f2c56cafa2a2de837907ae30c9a4d7",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 126,
"avg_line_length": 37.53061224489796,
"alnum_prop": 0.573533046616244,
"repo_name": "chreman/isis-praktikum",
"id": "404ae076e3689f94424fe3f491b66a2183f50fe6",
"size": "20232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sustainabilitylsa.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20232"
}
],
"symlink_target": ""
} |
"""
Title: Image classification via fine-tuning with EfficientNet
Author: [Yixing Fu](https://github.com/yixingfu)
Date created: 2020/06/30
Last modified: 2020/07/16
Description: Use EfficientNet with weights pre-trained on imagenet for Stanford Dogs classification.
"""
"""
## Introduction: what is EfficientNet
EfficientNet, first introduced in [Tan and Le, 2019](https://arxiv.org/abs/1905.11946)
is among the most efficient models (i.e. requiring least FLOPS for inference)
that reaches State-of-the-Art accuracy on both
imagenet and common image classification transfer learning tasks.
The smallest base model is similar to [MnasNet](https://arxiv.org/abs/1807.11626), which
reached near-SOTA with a significantly smaller model. By introducing a heuristic way to
scale the model, EfficientNet provides a family of models (B0 to B7) that represents a
good combination of efficiency and accuracy on a variety of scales. Such a scaling
heuristics (compound-scaling, details see
[Tan and Le, 2019](https://arxiv.org/abs/1905.11946)) allows the
efficiency-oriented base model (B0) to surpass models at every scale, while avoiding
extensive grid-search of hyperparameters.
A summary of the latest updates on the model is available at
[here](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet), where various
augmentation schemes and semi-supervised learning approaches are applied to further
improve the imagenet performance of the models. These extensions of the model can be used
by updating weights without changing model architecture.
## B0 to B7 variants of EfficientNet
*(This section provides some details on "compound scaling", and can be skipped
if you're only interested in using the models)*
Based on the [original paper](https://arxiv.org/abs/1905.11946) people may have the
impression that EfficientNet is a continuous family of models created by arbitrarily
choosing scaling factor in as Eq.(3) of the paper. However, choice of resolution,
depth and width are also restricted by many factors:
- Resolution: Resolutions not divisible by 8, 16, etc. cause zero-padding near boundaries
of some layers which wastes computational resources. This especially applies to smaller
variants of the model, hence the input resolution for B0 and B1 are chosen as 224 and
240.
- Depth and width: The building blocks of EfficientNet demands channel size to be
multiples of 8.
- Resource limit: Memory limitation may bottleneck resolution when depth
and width can still increase. In such a situation, increasing depth and/or
width but keep resolution can still improve performance.
As a result, the depth, width and resolution of each variant of the EfficientNet models
are hand-picked and proven to produce good results, though they may be significantly
off from the compound scaling formula.
Therefore, the keras implementation (detailed below) only provide these 8 models, B0 to B7,
instead of allowing arbitray choice of width / depth / resolution parameters.
## Keras implementation of EfficientNet
An implementation of EfficientNet B0 to B7 has been shipped with tf.keras since TF2.3. To
use EfficientNetB0 for classifying 1000 classes of images from imagenet, run:
```python
from tensorflow.keras.applications import EfficientNetB0
model = EfficientNetB0(weights='imagenet')
```
This model takes input images of shape (224, 224, 3), and the input data should range
[0, 255]. Normalization is included as part of the model.
Because training EfficientNet on ImageNet takes a tremendous amount of resources and
several techniques that are not a part of the model architecture itself. Hence the Keras
implementation by default loads pre-trained weights obtained via training with
[AutoAugment](https://arxiv.org/abs/1805.09501).
For B0 to B7 base models, the input shapes are different. Here is a list of input shape
expected for each model:
| Base model | resolution|
|----------------|-----|
| EfficientNetB0 | 224 |
| EfficientNetB1 | 240 |
| EfficientNetB2 | 260 |
| EfficientNetB3 | 300 |
| EfficientNetB4 | 380 |
| EfficientNetB5 | 456 |
| EfficientNetB6 | 528 |
| EfficientNetB7 | 600 |
When the model is intended for transfer learning, the Keras implementation
provides a option to remove the top layers:
```
model = EfficientNetB0(include_top=False, weights='imagenet')
```
This option excludes the final `Dense` layer that turns 1280 features on the penultimate
layer into prediction of the 1000 ImageNet classes. Replacing the top layer with custom
layers allows using EfficientNet as a feature extractor in a transfer learning workflow.
Another argument in the model constructor worth noticing is `drop_connect_rate` which controls
the dropout rate responsible for [stochastic depth](https://arxiv.org/abs/1603.09382).
This parameter serves as a toggle for extra regularization in finetuning, but does not
affect loaded weights. For example, when stronger regularization is desired, try:
```python
model = EfficientNetB0(weights='imagenet', drop_connect_rate=0.4)
```
The default value is 0.2.
## Example: EfficientNetB0 for Stanford Dogs.
EfficientNet is capable of a wide range of image classification tasks.
This makes it a good model for transfer learning.
As an end-to-end example, we will show using pre-trained EfficientNetB0 on
[Stanford Dogs](http://vision.stanford.edu/aditya86/ImageNetDogs/main.html) dataset.
"""
# IMG_SIZE is determined by EfficientNet model choice
IMG_SIZE = 224
"""
## Setup and data loading
This example requires TensorFlow 2.3 or above.
To use TPU, the TPU runtime must match current running TensorFlow
version. If there is a mismatch, try:
```python
from cloud_tpu_client import Client
c = Client()
c.configure_tpu_version(tf.__version__, restart_type="always")
```
"""
import tensorflow as tf
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect()
print("Device:", tpu.master())
strategy = tf.distribute.TPUStrategy(tpu)
except ValueError:
print("Not connected to a TPU runtime. Using CPU/GPU strategy")
strategy = tf.distribute.MirroredStrategy()
"""
### Loading data
Here we load data from [tensorflow_datasets](https://www.tensorflow.org/datasets)
(hereafter TFDS).
Stanford Dogs dataset is provided in
TFDS as [stanford_dogs](https://www.tensorflow.org/datasets/catalog/stanford_dogs).
It features 20,580 images that belong to 120 classes of dog breeds
(12,000 for training and 8,580 for testing).
By simply changing `dataset_name` below, you may also try this notebook for
other datasets in TFDS such as
[cifar10](https://www.tensorflow.org/datasets/catalog/cifar10),
[cifar100](https://www.tensorflow.org/datasets/catalog/cifar100),
[food101](https://www.tensorflow.org/datasets/catalog/food101),
etc. When the images are much smaller than the size of EfficientNet input,
we can simply upsample the input images. It has been shown in
[Tan and Le, 2019](https://arxiv.org/abs/1905.11946) that transfer learning
result is better for increased resolution even if input images remain small.
For TPU: if using TFDS datasets,
a [GCS bucket](https://cloud.google.com/storage/docs/key-terms#buckets)
location is required to save the datasets. For example:
```python
tfds.load(dataset_name, data_dir="gs://example-bucket/datapath")
```
Also, both the current environment and the TPU service account have
proper [access](https://cloud.google.com/tpu/docs/storage-buckets#authorize_the_service_account)
to the bucket. Alternatively, for small datasets you may try loading data
into the memory and use `tf.data.Dataset.from_tensor_slices()`.
"""
import tensorflow_datasets as tfds
batch_size = 64
dataset_name = "stanford_dogs"
(ds_train, ds_test), ds_info = tfds.load(
dataset_name, split=["train", "test"], with_info=True, as_supervised=True
)
NUM_CLASSES = ds_info.features["label"].num_classes
"""
When the dataset include images with various size, we need to resize them into a
shared size. The Stanford Dogs dataset includes only images at least 200x200
pixels in size. Here we resize the images to the input size needed for EfficientNet.
"""
size = (IMG_SIZE, IMG_SIZE)
ds_train = ds_train.map(lambda image, label: (tf.image.resize(image, size), label))
ds_test = ds_test.map(lambda image, label: (tf.image.resize(image, size), label))
"""
### Visualizing the data
The following code shows the first 9 images with their labels.
"""
import matplotlib.pyplot as plt
def format_label(label):
string_label = label_info.int2str(label)
return string_label.split("-")[1]
label_info = ds_info.features["label"]
for i, (image, label) in enumerate(ds_train.take(9)):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image.numpy().astype("uint8"))
plt.title("{}".format(format_label(label)))
plt.axis("off")
"""
### Data augmentation
We can use the preprocessing layers APIs for image augmentation.
"""
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
img_augmentation = Sequential(
[
layers.RandomRotation(factor=0.15),
layers.RandomTranslation(height_factor=0.1, width_factor=0.1),
layers.RandomFlip(),
layers.RandomContrast(factor=0.1),
],
name="img_augmentation",
)
"""
This `Sequential` model object can be used both as a part of
the model we later build, and as a function to preprocess
data before feeding into the model. Using them as function makes
it easy to visualize the augmented images. Here we plot 9 examples
of augmentation result of a given figure.
"""
for image, label in ds_train.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
aug_img = img_augmentation(tf.expand_dims(image, axis=0))
plt.imshow(aug_img[0].numpy().astype("uint8"))
plt.title("{}".format(format_label(label)))
plt.axis("off")
"""
### Prepare inputs
Once we verify the input data and augmentation are working correctly,
we prepare dataset for training. The input data are resized to uniform
`IMG_SIZE`. The labels are put into one-hot
(a.k.a. categorical) encoding. The dataset is batched.
Note: `prefetch` and `AUTOTUNE` may in some situation improve
performance, but depends on environment and the specific dataset used.
See this [guide](https://www.tensorflow.org/guide/data_performance)
for more information on data pipeline performance.
"""
# One-hot / categorical encoding
def input_preprocess(image, label):
label = tf.one_hot(label, NUM_CLASSES)
return image, label
ds_train = ds_train.map(input_preprocess, num_parallel_calls=tf.data.AUTOTUNE)
ds_train = ds_train.batch(batch_size=batch_size, drop_remainder=True)
ds_train = ds_train.prefetch(tf.data.AUTOTUNE)
ds_test = ds_test.map(input_preprocess)
ds_test = ds_test.batch(batch_size=batch_size, drop_remainder=True)
"""
## Training a model from scratch
We build an EfficientNetB0 with 120 output classes, that is initialized from scratch:
Note: the accuracy will increase very slowly and may overfit.
"""
from tensorflow.keras.applications import EfficientNetB0
with strategy.scope():
inputs = layers.Input(shape=(IMG_SIZE, IMG_SIZE, 3))
x = img_augmentation(inputs)
outputs = EfficientNetB0(include_top=True, weights=None, classes=NUM_CLASSES)(x)
model = tf.keras.Model(inputs, outputs)
model.compile(
optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]
)
model.summary()
epochs = 40 # @param {type: "slider", min:10, max:100}
hist = model.fit(ds_train, epochs=epochs, validation_data=ds_test, verbose=2)
"""
Training the model is relatively fast (takes only 20 seconds per epoch on TPUv2 that is
available on Colab). This might make it sounds easy to simply train EfficientNet on any
dataset wanted from scratch. However, training EfficientNet on smaller datasets,
especially those with lower resolution like CIFAR-100, faces the significant challenge of
overfitting.
Hence training from scratch requires very careful choice of hyperparameters and is
difficult to find suitable regularization. It would also be much more demanding in resources.
Plotting the training and validation accuracy
makes it clear that validation accuracy stagnates at a low value.
"""
import matplotlib.pyplot as plt
def plot_hist(hist):
plt.plot(hist.history["accuracy"])
plt.plot(hist.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "validation"], loc="upper left")
plt.show()
plot_hist(hist)
"""
## Transfer learning from pre-trained weights
Here we initialize the model with pre-trained ImageNet weights,
and we fine-tune it on our own dataset.
"""
def build_model(num_classes):
inputs = layers.Input(shape=(IMG_SIZE, IMG_SIZE, 3))
x = img_augmentation(inputs)
model = EfficientNetB0(include_top=False, input_tensor=x, weights="imagenet")
# Freeze the pretrained weights
model.trainable = False
# Rebuild top
x = layers.GlobalAveragePooling2D(name="avg_pool")(model.output)
x = layers.BatchNormalization()(x)
top_dropout_rate = 0.2
x = layers.Dropout(top_dropout_rate, name="top_dropout")(x)
outputs = layers.Dense(NUM_CLASSES, activation="softmax", name="pred")(x)
# Compile
model = tf.keras.Model(inputs, outputs, name="EfficientNet")
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)
model.compile(
optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"]
)
return model
"""
The first step to transfer learning is to freeze all layers and train only the top
layers. For this step, a relatively large learning rate (1e-2) can be used.
Note that validation accuracy and loss will usually be better than training
accuracy and loss. This is because the regularization is strong, which only
suppresses training-time metrics.
Note that the convergence may take up to 50 epochs depending on choice of learning rate.
If image augmentation layers were not
applied, the validation accuracy may only reach ~60%.
"""
with strategy.scope():
model = build_model(num_classes=NUM_CLASSES)
epochs = 25 # @param {type: "slider", min:8, max:80}
hist = model.fit(ds_train, epochs=epochs, validation_data=ds_test, verbose=2)
plot_hist(hist)
"""
The second step is to unfreeze a number of layers and fit the model using smaller
learning rate. In this example we show unfreezing all layers, but depending on
specific dataset it may be desireble to only unfreeze a fraction of all layers.
When the feature extraction with
pretrained model works good enough, this step would give a very limited gain on
validation accuracy. In our case we only see a small improvement,
as ImageNet pretraining already exposed the model to a good amount of dogs.
On the other hand, when we use pretrained weights on a dataset that is more different
from ImageNet, this fine-tuning step can be crucial as the feature extractor also
needs to be adjusted by a considerable amount. Such a situation can be demonstrated
if choosing CIFAR-100 dataset instead, where fine-tuning boosts validation accuracy
by about 10% to pass 80% on `EfficientNetB0`.
In such a case the convergence may take more than 50 epochs.
A side note on freezing/unfreezing models: setting `trainable` of a `Model` will
simultaneously set all layers belonging to the `Model` to the same `trainable`
attribute. Each layer is trainable only if both the layer itself and the model
containing it are trainable. Hence when we need to partially freeze/unfreeze
a model, we need to make sure the `trainable` attribute of the model is set
to `True`.
"""
def unfreeze_model(model):
# We unfreeze the top 20 layers while leaving BatchNorm layers frozen
for layer in model.layers[-20:]:
if not isinstance(layer, layers.BatchNormalization):
layer.trainable = True
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4)
model.compile(
optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"]
)
unfreeze_model(model)
epochs = 10 # @param {type: "slider", min:8, max:50}
hist = model.fit(ds_train, epochs=epochs, validation_data=ds_test, verbose=2)
plot_hist(hist)
"""
### Tips for fine tuning EfficientNet
On unfreezing layers:
- The `BatchNormalization` layers need to be kept frozen
([more details](https://keras.io/guides/transfer_learning/)).
If they are also turned to trainable, the
first epoch after unfreezing will significantly reduce accuracy.
- In some cases it may be beneficial to open up only a portion of layers instead of
unfreezing all. This will make fine tuning much faster when going to larger models like
B7.
- Each block needs to be all turned on or off. This is because the architecture includes
a shortcut from the first layer to the last layer for each block. Not respecting blocks
also significantly harms the final performance.
Some other tips for utilizing EfficientNet:
- Larger variants of EfficientNet do not guarantee improved performance, especially for
tasks with less data or fewer classes. In such a case, the larger variant of EfficientNet
chosen, the harder it is to tune hyperparameters.
- EMA (Exponential Moving Average) is very helpful in training EfficientNet from scratch,
but not so much for transfer learning.
- Do not use the RMSprop setup as in the original paper for transfer learning. The
momentum and learning rate are too high for transfer learning. It will easily corrupt the
pretrained weight and blow up the loss. A quick check is to see if loss (as categorical
cross entropy) is getting significantly larger than log(NUM_CLASSES) after the same
epoch. If so, the initial learning rate/momentum is too high.
- Smaller batch size benefit validation accuracy, possibly due to effectively providing
regularization.
## Using the latest EfficientNet weights
Since the initial paper, the EfficientNet has been improved by various methods for data
preprocessing and for using unlabelled data to enhance learning results. These
improvements are relatively hard and computationally costly to reproduce, and require
extra code; but the weights are readily available in the form of TF checkpoint files. The
model architecture has not changed, so loading the improved checkpoints is possible.
To use a checkpoint provided at
[the official model repository](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet), first
download the checkpoint. As example, here we download noisy-student version of B1:
```
!wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet\
/noisystudent/noisy_student_efficientnet-b1.tar.gz
!tar -xf noisy_student_efficientnet-b1.tar.gz
```
Then use the script [efficientnet_weight_update_util.py](https://github.com/keras-team/keras/blob/master/keras/applications/efficientnet_weight_update_util.py) to convert ckpt file to h5 file.
```
!python efficientnet_weight_update_util.py --model b1 --notop --ckpt \
efficientnet-b1/model.ckpt --o efficientnetb1_notop.h5
```
When creating model, use the following to load new weight:
```python
model = EfficientNetB1(weights="efficientnetb1_notop.h5", include_top=False)
```
"""
| {
"content_hash": "f39bc282532323810cdf182427d1b528",
"timestamp": "",
"source": "github",
"line_count": 499,
"max_line_length": 192,
"avg_line_length": 38.44288577154309,
"alnum_prop": 0.7620288797372674,
"repo_name": "keras-team/keras-io",
"id": "4da25d54bd0a2321bb1935a1ebd535577afe1d39",
"size": "19183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/vision/image_classification_efficientnet_fine_tuning.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15929"
},
{
"name": "Dockerfile",
"bytes": "188"
},
{
"name": "HTML",
"bytes": "21968"
},
{
"name": "Jupyter Notebook",
"bytes": "718942"
},
{
"name": "Makefile",
"bytes": "193"
},
{
"name": "Python",
"bytes": "680865"
}
],
"symlink_target": ""
} |
import os
MONGODB_SETTINGS = {
'db': 'tushedb', # Name of the database, other available settings refer to Mongoengine documentation
}
APP_NAME = 'light-cms' # English Only
SECRET_KEY = 'secret' # Change this in production
SITE_NAME = '图社'
ADMIN_URL = '/admin/' # I forgot where I used it
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
UPLOAD_FOLDER = os.path.join(BASE_DIR, 'static/uploads')
UPLOAD_URL = '/static/uploads/'
# Wechat related
wc_appid = 'appid'
wc_secret = 'secret'
wc_id = 'wc_id' # Wechat public account id (the one you set, NOT the original id)
wc_token = 'wc_token' # Wechat public
# Duoshuo
duoshuo_short_name = 'xxxxx'
| {
"content_hash": "3f5409138b28f7581da35bff65981dab",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 105,
"avg_line_length": 26.6,
"alnum_prop": 0.6902255639097744,
"repo_name": "nsdown/tushe",
"id": "73f3718b5dc1c38bddcbedc591d44abbff53d38e",
"size": "693",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5414"
},
{
"name": "HTML",
"bytes": "17186"
},
{
"name": "JavaScript",
"bytes": "1297"
},
{
"name": "Python",
"bytes": "17989"
}
],
"symlink_target": ""
} |
import wx
import wx.html
from ..utils.generic_class import GenericClass
from ..utils.constants import control, dtype
from ..utils.validator import CharValidator
import os
import pkg_resources as p
class AnatomicalPreprocessing(wx.html.HtmlWindow):
def __init__(self, parent, counter = 0):
from urllib2 import urlopen
wx.html.HtmlWindow.__init__(self, parent, style= wx.html.HW_SCROLLBAR_AUTO)
self.SetStandardFonts()
self.counter = counter
self.LoadPage(p.resource_filename('CPAC', 'GUI/resources/html/anat.html'))
# try:
# code = urlopen("http://fcp-indi.github.io/docs/user/anat.html").code
# if (code / 100 < 4):
# self.LoadPage('http://fcp-indi.github.io/docs/user/anat.html')
# else:
# self.LoadFile('html/anat.html')
# except:
# self.LoadFile('html/anat.html')
def get_counter(self):
return self.counter
class Segmentation(wx.ScrolledWindow):
def __init__(self, parent, counter =0):
wx.ScrolledWindow.__init__(self, parent)
import os
self.counter = counter
fsl = os.environ.get('FSLDIR')
if not fsl:
fsl = "$FSLDIR"
self.page = GenericClass(self, "Automatic Tissue Segmentation ")
self.page.add(label="Run Tissue Segmentation ",
control=control.CHOICE_BOX,
name='runSegmentationPreprocessing',
type=dtype.LSTR,
comment="Automatically segment anatomical images into white matter, gray matter, and CSF based on prior probability maps.",
values=["On","Off","On/Off"],
wkf_switch = True)
self.page.add(label= "White Matter Probability Threshold ",
control=control.TEXT_BOX,
name='whiteMatterThreshold',
type=dtype.LNUM,
values= "0.96",
validator = CharValidator("no-alpha"),
comment="Only voxels with a White Matter probability greater than this value will be classified as White Matter.\n\nCan be a single value or a list of values separated by commas.")
self.page.add(label = "Gray Matter Probability Threshold ",
control =control.TEXT_BOX,
name = 'grayMatterThreshold',
type =dtype.LNUM,
values= "0.7",
validator = CharValidator("no-alpha"),
comment= "Only voxels with a Gray Matter probability greater than this value will be classified as Gray Matter.\n\nCan be a single value or a list of values separated by commas.")
self.page.add(label= "CSF Probability Threshold ",
control=control.TEXT_BOX,
name='cerebralSpinalFluidThreshold',
type=dtype.LNUM,
values = "0.96",
validator = CharValidator("no-alpha"),
comment="Only voxels with a CSF probability greater than this value will be classified as CSF.\n\nCan be a single value or a list of values separated by commas.")
self.page.add(label= "Priors Directory ",
control=control.DIR_COMBO_BOX,
name='prior_path',
type=dtype.STR,
values= os.path.join(fsl, 'data/standard/tissuepriors/$standardResolution'),
comment="Full path to a directory containing binarized prior probability maps.\n\nThese maps are included as part of the 'Image Resource Files' package available on the Install page of the User Guide.\n\nIt is not necessary to change this path unless you intend to use non-standard priors.")
self.page.add(label= "White Matter Prior Probability Map ",
control=control.COMBO_BOX,
name='PRIOR_WHITE',
type=dtype.STR,
values = '$prior_path/avg152T1_white_bin.nii.gz',
comment="Full path to a binarized White Matter prior probability map.\n\nIt is not necessary to change this path unless you intend to use non-standard priors.")
self.page.add(label= "Gray Matter Prior Probability Map ",
control=control.COMBO_BOX,
name='PRIOR_GRAY',
type=dtype.STR,
values = '$prior_path/avg152T1_gray_bin.nii.gz',
comment="Full path to a binarized Gray Matter prior probability map.\n\nIt is not necessary to change this path unless you intend to use non-standard priors.")
self.page.add(label= "CSF Prior Probability Map ",
control=control.COMBO_BOX,
name='PRIOR_CSF',
type=dtype.STR,
values = '$prior_path/avg152T1_csf_bin.nii.gz',
comment="Full path to a binarized CSF prior probability map.\n\nIt is not necessary to change this path unless you intend to use non-standard priors.")
self.page.set_sizer()
parent.get_page_list().append(self)
def get_counter(self):
return self.counter
class Registration(wx.ScrolledWindow):
def __init__(self, parent, counter = 0):
wx.ScrolledWindow.__init__(self, parent)
self.counter = counter
self.page = GenericClass(self, "Anatomical Registration")
fsl = os.environ.get('FSLDIR')
if not fsl:
fsl = "$FSLDIR"
self.page.add(label="Run Anatomical Registration ",
control=control.CHOICE_BOX,
name='runRegistrationPreprocessing',
type=dtype.LSTR,
comment="Register anatomical images to a template.",
values=["On","Off","On/Off"],
wkf_switch = True)
self.page.add(label="Anatomical Template Resolution ",
control=control.CHOICE_BOX,
name='standardResolutionAnat',
type=dtype.STR,
values = ["1mm", "2mm", "3mm"],
comment="The resolution to which anatomical images should be transformed during registration.\n\nThis is the resolution at which processed anatomical files will be output.")
self.page.add(label="Anatomical Template (Brain Only) ",
control=control.COMBO_BOX,
name='standardResolutionBrainAnat',
type=dtype.STR,
values = str(os.path.join(fsl, "data/standard/MNI152_T1_${standardResolutionAnat}_brain.nii.gz")),
comment="Template to be used during registration.\n\nIt is not necessary to change this path unless you intend to use a non-standard template.")
self.page.add(label="Anatomical Template (With Skull) ",
control=control.COMBO_BOX,
name='standardAnat',
type=dtype.STR,
values = str(os.path.join(fsl, "data/standard/MNI152_T1_${standardResolutionAnat}.nii.gz")),
comment="Template to be used during registration.\n\nIt is not necessary to change this path unless you intend to use a non-standard template.")
self.page.add(label="Anatomical to Template Registration Method ",
control=control.CHOICE_BOX,
name='regOption',
type=dtype.LSTR,
comment="Use either ANTS or FSL (FLIRT and FNIRT) as your anatomical registration method.",
values=["ANTS","FSL","ANTS & FSL"],
wkf_switch = True)
self.page.add(label="FSL FNIRT Configuration File (FSL only) ",
control=control.COMBO_BOX,
name='fnirtConfig',
type=dtype.STR,
values = str(os.path.join("T1_2_MNI152_2mm")),
comment="Configuration file to be used by FSL to set FNIRT parameters.\n\nIt is not necessary to change this path unless you intend to use custom FNIRT parameters or a non-standard template.")
self.page.set_sizer()
parent.get_page_list().append(self)
def get_counter(self):
return self.counter
| {
"content_hash": "30c57b12f71f5c09ed2406e040bbeaaf",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 308,
"avg_line_length": 49.03448275862069,
"alnum_prop": 0.569971870604782,
"repo_name": "sgiavasis/C-PAC",
"id": "cf850c588066eff34884be94b3daca0a002f5249",
"size": "8532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CPAC/GUI/interface/pages/anatomical.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16855"
},
{
"name": "JavaScript",
"bytes": "52711"
},
{
"name": "Perl",
"bytes": "480"
},
{
"name": "Python",
"bytes": "5521856"
},
{
"name": "Shell",
"bytes": "4507"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import glob
import os
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class TestIndexJavaIntegration(PantsRunIntegrationTest):
def test_index_simple_java_code(self):
# Very simple test that we can run the extractor and indexer on some
# fairly trivial code without crashing, and that we produce something.
args = ['kythe', 'examples/src/java/org/pantsbuild/example/hello::']
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir(args, workdir)
self.assert_success(pants_run)
for tgt in ['examples.src.java.org.pantsbuild.example.hello.greet.greet',
'examples.src.java.org.pantsbuild.example.hello.main.main-bin',
'examples.src.java.org.pantsbuild.example.hello.simple.simple']:
kindex_glob = os.path.join(workdir,
'kythe/extract/current/{}/current/*.kindex'.format(tgt))
kindex_files = glob.glob(kindex_glob)
self.assertEquals(1, len(kindex_files))
kindex_file = kindex_files[0]
self.assertTrue(os.path.isfile(kindex_file))
self.assertGreater(os.path.getsize(kindex_file), 200) # Make sure it's not trivial.
entries_path = os.path.join(workdir,
'kythe/index/current/{}/current/index.entries'.format(tgt))
self.assertTrue(os.path.isfile(entries_path))
self.assertGreater(os.path.getsize(entries_path), 1000) # Make sure it's not trivial.
| {
"content_hash": "fe907926b8e8fdba090e5114641f3501",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 95,
"avg_line_length": 52.03125,
"alnum_prop": 0.6684684684684684,
"repo_name": "pombredanne/pants",
"id": "695888163897c0f595aa96043ddcdf2b39b23a9e",
"size": "1812",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "contrib/kythe/tests/python/pants_test/contrib/kythe/tasks/test_index_java_integration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1746"
},
{
"name": "HTML",
"bytes": "79866"
},
{
"name": "Java",
"bytes": "446241"
},
{
"name": "JavaScript",
"bytes": "29992"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "5091198"
},
{
"name": "Scala",
"bytes": "84585"
},
{
"name": "Shell",
"bytes": "58748"
},
{
"name": "Thrift",
"bytes": "1966"
}
],
"symlink_target": ""
} |
"""
======================================================================
Time-frequency on simulated data (Multitaper vs. Morlet vs. Stockwell)
======================================================================
This example demonstrates the different time-frequency estimation methods
on simulated data. It shows the time-frequency resolution trade-off
and the problem of estimation variance. In addition it highlights
alternative functions for generating TFRs without averaging across
trials, or by operating on numpy arrays.
"""
# Authors: Hari Bharadwaj <[email protected]>
# Denis Engemann <[email protected]>
# Chris Holdgraf <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from matplotlib import pyplot as plt
from mne import create_info, EpochsArray
from mne.baseline import rescale
from mne.time_frequency import (tfr_multitaper, tfr_stockwell, tfr_morlet,
tfr_array_morlet)
print(__doc__)
###############################################################################
# Simulate data
# -------------
#
# We'll simulate data with a known spectro-temporal structure.
sfreq = 1000.0
ch_names = ['SIM0001', 'SIM0002']
ch_types = ['grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = 1024 # Just over 1 second epochs
n_epochs = 40
seed = 42
rng = np.random.RandomState(seed)
noise = rng.randn(n_epochs, len(ch_names), n_times)
# Add a 50 Hz sinusoidal burst to the noise and ramp it.
t = np.arange(n_times, dtype=np.float) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
data = noise + signal
reject = dict(grad=4000)
events = np.empty((n_epochs, 3), dtype=int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=data, info=info, events=events, event_id=event_id,
reject=reject)
epochs.average().plot()
###############################################################################
# Calculate a time-frequency representation (TFR)
# -----------------------------------------------
#
# Below we'll demonstrate the output of several TFR functions in MNE:
#
# * :func:`mne.time_frequency.tfr_multitaper`
# * :func:`mne.time_frequency.tfr_stockwell`
# * :func:`mne.time_frequency.tfr_morlet`
#
# Multitaper transform
# ====================
# First we'll use the multitaper method for calculating the TFR.
# This creates several orthogonal tapering windows in the TFR estimation,
# which reduces variance. We'll also show some of the parameters that can be
# tweaked (e.g., ``time_bandwidth``) that will result in different multitaper
# properties, and thus a different TFR. You can trade time resolution or
# frequency resolution or both in order to get a reduction in variance.
freqs = np.arange(5., 100., 3.)
vmin, vmax = -3., 3. # Define our color limits.
###############################################################################
# **(1) Least smoothing (most variance/background fluctuations).**
n_cycles = freqs / 2.
time_bandwidth = 2.0 # Least possible frequency-smoothing (1 taper)
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Least smoothing, most variance')
###############################################################################
# **(2) Less frequency smoothing, more time smoothing.**
n_cycles = freqs # Increase time-window length to 1 second.
time_bandwidth = 4.0 # Same frequency-smoothing as (1) 3 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Less frequency smoothing, more time smoothing')
###############################################################################
# **(3) Less time smoothing, more frequency smoothing.**
n_cycles = freqs / 2.
time_bandwidth = 8.0 # Same time-smoothing as (1), 7 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Less time smoothing, more frequency smoothing')
##############################################################################
# Stockwell (S) transform
# =======================
#
# Stockwell uses a Gaussian window to balance temporal and spectral resolution.
# Importantly, frequency bands are phase-normalized, hence strictly comparable
# with regard to timing, and, the input signal can be recoverd from the
# transform in a lossless way if we disregard numerical errors. In this case,
# we control the spectral / temporal resolution by specifying different widths
# of the gaussian window using the ``width`` parameter.
fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
fmin, fmax = freqs[[0, -1]]
for width, ax in zip((0.2, .7, 3.0), axs):
power = tfr_stockwell(epochs, fmin=fmin, fmax=fmax, width=width)
power.plot([0], baseline=(0., 0.1), mode='mean', axes=ax, show=False,
colorbar=False)
ax.set_title('Sim: Using S transform, width = {:0.1f}'.format(width))
plt.tight_layout()
###############################################################################
# Morlet Wavelets
# ===============
#
# Finally, show the TFR using morlet wavelets, which are a sinusoidal wave
# with a gaussian envelope. We can control the balance between spectral and
# temporal resolution with the ``n_cycles`` parameter, which defines the
# number of cycles to include in the window.
fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
all_n_cycles = [1, 3, freqs / 2.]
for n_cycles, ax in zip(all_n_cycles, axs):
power = tfr_morlet(epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=False)
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
axes=ax, show=False, colorbar=False)
n_cycles = 'scaled by freqs' if not isinstance(n_cycles, int) else n_cycles
ax.set_title('Sim: Using Morlet wavelet, n_cycles = %s' % n_cycles)
plt.tight_layout()
###############################################################################
# Calculating a TFR without averaging over epochs
# -----------------------------------------------
#
# It is also possible to calculate a TFR without averaging across trials.
# We can do this by using ``average=False``. In this case, an instance of
# :class:`mne.time_frequency.EpochsTFR` is returned.
n_cycles = freqs / 2.
power = tfr_morlet(epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=False, average=False)
print(type(power))
avgpower = power.average()
avgpower.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Using Morlet wavelets and EpochsTFR', show=False)
###############################################################################
# Operating on arrays
# -------------------
#
# MNE also has versions of the functions above which operate on numpy arrays
# instead of MNE objects. They expect inputs of the shape
# ``(n_epochs, n_channels, n_times)``. They will also return a numpy array
# of shape ``(n_epochs, n_channels, n_freqs, n_times)``.
power = tfr_array_morlet(epochs.get_data(), sfreq=epochs.info['sfreq'],
freqs=freqs, n_cycles=n_cycles,
output='avg_power')
# Baseline the output
rescale(power, epochs.times, (0., 0.1), mode='mean', copy=False)
fig, ax = plt.subplots()
mesh = ax.pcolormesh(epochs.times * 1000, freqs, power[0],
cmap='RdBu_r', vmin=vmin, vmax=vmax)
ax.set_title('TFR calculated on a numpy array')
ax.set(ylim=freqs[[0, -1]], xlabel='Time (ms)')
fig.colorbar(mesh)
plt.tight_layout()
plt.show()
| {
"content_hash": "2d2f217f978f2a68a555f11ad116ab1d",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 79,
"avg_line_length": 42.56060606060606,
"alnum_prop": 0.6035362525216565,
"repo_name": "mne-tools/mne-tools.github.io",
"id": "da22307274e2da9e1123689fd867a4efa07f001f",
"size": "8427",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "0.17/_downloads/7919fb36eddf047b9e007999813d42f4/plot_time_frequency_simulated.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "708696"
},
{
"name": "Dockerfile",
"bytes": "1820"
},
{
"name": "HTML",
"bytes": "1526247783"
},
{
"name": "JavaScript",
"bytes": "1323087"
},
{
"name": "Jupyter Notebook",
"bytes": "24820047"
},
{
"name": "Python",
"bytes": "18575494"
}
],
"symlink_target": ""
} |
import pytest
import responses
from flask import Flask
from urlobject import URLObject
from flask_dance.consumer import OAuth2ConsumerBlueprint
from flask_dance.consumer.storage import MemoryStorage
from flask_dance.contrib.meetup import make_meetup_blueprint, meetup
@pytest.fixture
def make_app():
"A callable to create a Flask app with the Meetup provider"
def _make_app(*args, **kwargs):
app = Flask(__name__)
app.secret_key = "whatever"
blueprint = make_meetup_blueprint(*args, **kwargs)
app.register_blueprint(blueprint)
return app
return _make_app
def test_blueprint_factory():
meetup_bp = make_meetup_blueprint(key="foo", secret="bar")
assert isinstance(meetup_bp, OAuth2ConsumerBlueprint)
assert meetup_bp.session.scope == ["basic"]
assert meetup_bp.session.base_url == "https://api.meetup.com/2/"
assert meetup_bp.session.client_id == "foo"
assert meetup_bp.client_secret == "bar"
assert meetup_bp.authorization_url == "https://secure.meetup.com/oauth2/authorize"
assert meetup_bp.token_url == "https://secure.meetup.com/oauth2/access"
assert meetup_bp.token_url_params == {"include_client_id": True}
def test_load_from_config(make_app):
app = make_app()
app.config["MEETUP_OAUTH_CLIENT_ID"] = "foo"
app.config["MEETUP_OAUTH_CLIENT_SECRET"] = "bar"
resp = app.test_client().get("/meetup")
url = resp.headers["Location"]
client_id = URLObject(url).query.dict.get("client_id")
assert client_id == "foo"
def test_blueprint_factory_scope():
meetup_bp = make_meetup_blueprint(key="foo", secret="bar", scope="customscope")
assert meetup_bp.session.scope == "customscope"
@responses.activate
def test_context_local(make_app):
responses.add(responses.GET, "https://meetup.com")
# set up two apps with two different set of auth tokens
app1 = make_app(
"foo1",
"bar1",
redirect_to="url1",
storage=MemoryStorage({"access_token": "app1"}),
)
app2 = make_app(
"foo2",
"bar2",
redirect_to="url2",
storage=MemoryStorage({"access_token": "app2"}),
)
# outside of a request context, referencing functions on the `meetup` object
# will raise an exception
with pytest.raises(RuntimeError):
meetup.get("https://meetup.com")
# inside of a request context, `meetup` should be a proxy to the correct
# blueprint session
with app1.test_request_context("/"):
app1.preprocess_request()
meetup.get("https://meetup.com")
request = responses.calls[0].request
assert request.headers["Authorization"] == "Bearer app1"
with app2.test_request_context("/"):
app2.preprocess_request()
meetup.get("https://meetup.com")
request = responses.calls[1].request
assert request.headers["Authorization"] == "Bearer app2"
| {
"content_hash": "1a9514af56f68a61b3221ff2248ea6e0",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 86,
"avg_line_length": 33.14772727272727,
"alnum_prop": 0.6643812135755913,
"repo_name": "singingwolfboy/flask-dance",
"id": "6412a598299c94728d34925579e07dc7610639fb",
"size": "2917",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/contrib/test_meetup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "329946"
}
],
"symlink_target": ""
} |
"""Tests for the Whois config flow."""
from unittest.mock import AsyncMock, MagicMock
import pytest
from whois.exceptions import (
FailedParsingWhoisOutput,
UnknownDateFormat,
UnknownTld,
WhoisCommandFailed,
)
from homeassistant.components.whois.const import DOMAIN
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import CONF_DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import FlowResultType
from tests.common import MockConfigEntry
async def test_full_user_flow(
hass: HomeAssistant,
mock_setup_entry: AsyncMock,
mock_whois_config_flow: MagicMock,
) -> None:
"""Test the full user configuration flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result.get("type") == FlowResultType.FORM
assert result.get("step_id") == SOURCE_USER
assert "flow_id" in result
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_DOMAIN: "Example.com"},
)
assert result2.get("type") == FlowResultType.CREATE_ENTRY
assert result2.get("title") == "Example.com"
assert result2.get("data") == {CONF_DOMAIN: "example.com"}
assert len(mock_setup_entry.mock_calls) == 1
@pytest.mark.parametrize(
"throw,reason",
[
(UnknownTld, "unknown_tld"),
(FailedParsingWhoisOutput, "unexpected_response"),
(UnknownDateFormat, "unknown_date_format"),
(WhoisCommandFailed, "whois_command_failed"),
],
)
async def test_full_flow_with_error(
hass: HomeAssistant,
mock_setup_entry: AsyncMock,
mock_whois_config_flow: MagicMock,
throw: Exception,
reason: str,
) -> None:
"""Test the full user configuration flow with an error.
This tests tests a full config flow, with an error happening; allowing
the user to fix the error and try again.
"""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result.get("type") == FlowResultType.FORM
assert result.get("step_id") == SOURCE_USER
assert "flow_id" in result
mock_whois_config_flow.side_effect = throw
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_DOMAIN: "Example.com"},
)
assert result2.get("type") == FlowResultType.FORM
assert result2.get("step_id") == SOURCE_USER
assert result2.get("errors") == {"base": reason}
assert "flow_id" in result2
assert len(mock_setup_entry.mock_calls) == 0
assert len(mock_whois_config_flow.mock_calls) == 1
mock_whois_config_flow.side_effect = None
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
user_input={CONF_DOMAIN: "Example.com"},
)
assert result3.get("type") == FlowResultType.CREATE_ENTRY
assert result3.get("title") == "Example.com"
assert result3.get("data") == {CONF_DOMAIN: "example.com"}
assert len(mock_setup_entry.mock_calls) == 1
assert len(mock_whois_config_flow.mock_calls) == 2
async def test_already_configured(
hass: HomeAssistant,
mock_setup_entry: AsyncMock,
mock_config_entry: MockConfigEntry,
mock_whois_config_flow: MagicMock,
) -> None:
"""Test we abort if already configured."""
mock_config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_DOMAIN: "HOME-Assistant.io"},
)
assert result.get("type") == FlowResultType.ABORT
assert result.get("reason") == "already_configured"
assert len(mock_setup_entry.mock_calls) == 0
| {
"content_hash": "0788a09a1e4310c8489ffe8ce11344a4",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 74,
"avg_line_length": 30.89344262295082,
"alnum_prop": 0.677368002122579,
"repo_name": "mezz64/home-assistant",
"id": "7250a9d15672f0b4a69664c39dd0623e6faae092",
"size": "3769",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/whois/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from django.core import urlresolvers
from django.conf.urls import include, url
from django.utils.translation import ugettext_lazy as _
from wagtail.wagtailcore import hooks
from wagtail.wagtailsearch.urls import admin as admin_urls
from wagtail.wagtailadmin.menu import MenuItem
@hooks.register('register_admin_urls')
def register_admin_urls():
return [
url(r'^search/', include(admin_urls)),
]
@hooks.register('construct_main_menu')
def construct_main_menu(request, menu_items):
# TEMPORARY: Only show if the user is a superuser
if request.user.is_superuser:
menu_items.append(
MenuItem(_('Editors picks'), urlresolvers.reverse('wagtailsearch_editorspicks_index'), classnames='icon icon-pick', order=900)
)
| {
"content_hash": "72247cea92dfcf3c0f7d1fbc07ad5661",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 138,
"avg_line_length": 31.916666666666668,
"alnum_prop": 0.7310704960835509,
"repo_name": "100Shapes/wagtail",
"id": "d3d533036160a50d186b3e1c595c6af85f57c0b1",
"size": "766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagtail/wagtailsearch/wagtail_hooks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "134161"
},
{
"name": "D",
"bytes": "2012"
},
{
"name": "JavaScript",
"bytes": "50022"
},
{
"name": "Python",
"bytes": "907538"
},
{
"name": "Shell",
"bytes": "8459"
}
],
"symlink_target": ""
} |
"""This module contains the clparserutil's unit tests."""
import logging
import optparse
import unittest
from .. import clparserutil
class TestCase(unittest.TestCase):
def test_check_logginglevel(self):
option = clparserutil.Option(
"--create",
action="store",
dest="logging_level",
default=logging.FATAL,
type="logginglevel",
help="whatever")
values = [
["debug", logging.DEBUG],
["info", logging.INFO],
["INFO", logging.INFO],
["warning", logging.WARNING],
["eRRor", logging.ERROR],
["CRITICAL", logging.CRITICAL],
["FATAL", logging.FATAL],
["dave", None],
["None", None],
["", None],
]
type_checker = clparserutil.Option.TYPE_CHECKER["logginglevel"]
self.assertIsNotNone(type_checker)
opt_string = option.get_opt_string(),
for value in values:
if value[1] is not None:
msg = "Failed to parse '%s' correctly." % value[0]
result = type_checker(option, opt_string, value[0])
self.assertEqual(result, value[1], msg)
else:
with self.assertRaises(optparse.OptionValueError):
type_checker(option, opt_string, value[0])
def test_check_user_colon_password(self):
option = clparserutil.Option(
"--create",
action="store",
dest="server",
default="",
type="usercolonpassword",
help="whatever")
values = [
["dave:simons", ("dave", "simons")],
["dave", None],
["dave:", None],
[":simons", None],
[":", None],
["", None],
]
type_checker = clparserutil.Option.TYPE_CHECKER["usercolonpassword"]
self.assertIsNotNone(type_checker)
opt_string = option.get_opt_string(),
for value in values:
if value[1] is not None:
msg = "Failed to parse '%s' correctly." % value[0]
result = type_checker(option, opt_string, value[0])
self.assertEqual(result, value[1], msg)
else:
with self.assertRaises(optparse.OptionValueError):
type_checker(option, opt_string, value[0])
def test_check_scheme_host_port(self):
option = clparserutil.Option(
"--create",
action="store",
dest="server",
default="bindle:8909",
type="schemehostport",
help="whatever")
values = [
["http://bindle:8909", "http://bindle:8909"],
["https://bindle:8909", "https://bindle:8909"],
["http://bindle", "http://bindle"],
["https://bindle", "https://bindle"],
["dave", None],
["http://bindle:", None],
["https://bindle:", None],
]
type_checker = clparserutil.Option.TYPE_CHECKER["schemehostport"]
self.assertIsNotNone(type_checker)
opt_string = option.get_opt_string(),
for value in values:
if value[1] is not None:
msg = "Failed to parse '%s' correctly." % value[0]
result = type_checker(option, opt_string, value[0])
self.assertEqual(result, value[1], msg)
else:
with self.assertRaises(optparse.OptionValueError):
type_checker(option, opt_string, value[0])
def test_check_boolean(self):
option = clparserutil.Option(
"--create",
action="store",
dest="create",
default=True,
type="boolean",
help="create key store - default = True")
values = [
["true", True],
["True", True],
["trUe", True],
["t", True],
["T", True],
["1", True],
["y", True],
["yes", True],
["y", True],
["false", False],
["False", False],
["FaLse", False],
["f", False],
["F", False],
["0", False],
["f", False],
["no", False],
["n", False],
["dave", None],
["None", None],
["", None],
]
type_checker = clparserutil.Option.TYPE_CHECKER["boolean"]
opt_string = option.get_opt_string(),
for value in values:
if value[1] is not None:
msg = "Failed to parse '%s' correctly." % value[0]
result = type_checker(option, opt_string, value[0])
self.assertEqual(result, value[1], msg)
else:
with self.assertRaises(optparse.OptionValueError):
type_checker(option, opt_string, value[0])
| {
"content_hash": "6f6f3d57137b106e752b7a57ad71023c",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 76,
"avg_line_length": 34.06849315068493,
"alnum_prop": 0.48391636509851227,
"repo_name": "simonsdave/tor-async-couchdb",
"id": "85e9ea308074564930af6fce437a223cbd7b0ca0",
"size": "4974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tor_async_couchdb/tests/clparserutil_unit_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "138307"
},
{
"name": "Shell",
"bytes": "2032"
}
],
"symlink_target": ""
} |
from sympy import Rational as frac
from ..helpers import article, expand_symmetries
from ._helpers import CnScheme
_source = article(
authors=["G.M. Ewing"],
title="On Approximate Cubature",
journal="The American Mathematical Monthly",
volume="48",
number="2",
month="feb",
year="1941",
pages="134-136",
url="https://doi.org/10.2307/2303604",
)
def ewing(n):
d = {"0": [[frac(2, 3)]], "a": [[frac(1, 3 * 2 ** n)], [1]]}
points, weights = expand_symmetries(d, n)
return CnScheme("Ewing", n, weights, points, 3, _source)
| {
"content_hash": "49ff5f90c69646edc956a5d9e073cbac",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 64,
"avg_line_length": 25.954545454545453,
"alnum_prop": 0.6147110332749562,
"repo_name": "nschloe/quadpy",
"id": "d5e107b09c9c45ebd25b6d27196c08fbe2fb64b1",
"size": "571",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/quadpy/cn/_ewing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "992"
},
{
"name": "Python",
"bytes": "897850"
}
],
"symlink_target": ""
} |
from toolset.test_types.verifications import basic_body_verification, verify_headers
from toolset.test_types.abstract_test_type import AbstractTestType
class TestType(AbstractTestType):
def __init__(self, config):
self.plaintext_url = ""
kwargs = {
'name': 'plaintext',
'requires_db': False,
'accept_header': self.accept('plaintext'),
'args': ['plaintext_url']
}
AbstractTestType.__init__(self, config, **kwargs)
def verify(self, base_url):
url = base_url + self.plaintext_url
headers, body = self.request_headers_and_body(url)
_, problems = basic_body_verification(body, url, is_json_check=False)
# plaintext_url should be at least "/plaintext"
if len(self.plaintext_url) < 10:
problems.append(
("fail",
"Route for plaintext must be at least 10 characters, found '{}' instead".format(self.plaintext_url),
url))
if len(problems) > 0:
return problems
# Case insensitive
body = body.lower()
expected = "hello, world!"
extra_bytes = len(body) - len(expected)
if expected not in body:
return [('fail', "Could not find 'Hello, World!' in response.",
url)]
if extra_bytes > 0:
problems.append(
('warn',
("Server is returning %s more bytes than are required. "
"This may negatively affect benchmark performance." %
extra_bytes), url))
problems += verify_headers(self.request_headers_and_body, headers, url, should_be='plaintext')
if len(problems) == 0:
return [('pass', '', url)]
else:
return problems
def get_url(self):
return self.plaintext_url
def get_script_name(self):
return 'pipeline.sh'
def get_script_variables(self, name, url):
return {
'max_concurrency':
max(self.config.concurrency_levels),
'name':
name,
'duration':
self.config.duration,
'levels':
" ".join("{}".format(item)
for item in self.config.pipeline_concurrency_levels),
'server_host':
self.config.server_host,
'url':
url,
'pipeline':
16,
'accept':
"text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
}
| {
"content_hash": "6dbde592bc0eecd0698f7d4c54fb2dcc",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 117,
"avg_line_length": 32.8875,
"alnum_prop": 0.5248954770049411,
"repo_name": "greenlaw110/FrameworkBenchmarks",
"id": "1ca4bee1236c5fcb1c83bf165ece2174b9cf3958",
"size": "2631",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "toolset/test_types/plaintext/plaintext.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "104"
},
{
"name": "Batchfile",
"bytes": "1125"
},
{
"name": "C",
"bytes": "176072"
},
{
"name": "C#",
"bytes": "453771"
},
{
"name": "C++",
"bytes": "170038"
},
{
"name": "CMake",
"bytes": "6315"
},
{
"name": "CSS",
"bytes": "2035"
},
{
"name": "Clojure",
"bytes": "80807"
},
{
"name": "Common Lisp",
"bytes": "22084"
},
{
"name": "Crystal",
"bytes": "27193"
},
{
"name": "D",
"bytes": "203825"
},
{
"name": "Dart",
"bytes": "52130"
},
{
"name": "Dockerfile",
"bytes": "327921"
},
{
"name": "Dylan",
"bytes": "868"
},
{
"name": "Elixir",
"bytes": "14368"
},
{
"name": "Erlang",
"bytes": "41222"
},
{
"name": "F#",
"bytes": "89739"
},
{
"name": "Go",
"bytes": "163503"
},
{
"name": "Groovy",
"bytes": "21834"
},
{
"name": "HTML",
"bytes": "141462"
},
{
"name": "Hack",
"bytes": "2261"
},
{
"name": "Haskell",
"bytes": "70225"
},
{
"name": "Java",
"bytes": "679190"
},
{
"name": "JavaScript",
"bytes": "174521"
},
{
"name": "Kotlin",
"bytes": "57654"
},
{
"name": "Lua",
"bytes": "14508"
},
{
"name": "Makefile",
"bytes": "4321"
},
{
"name": "Meson",
"bytes": "846"
},
{
"name": "MoonScript",
"bytes": "2396"
},
{
"name": "Nim",
"bytes": "1288"
},
{
"name": "PHP",
"bytes": "504030"
},
{
"name": "PLpgSQL",
"bytes": "3446"
},
{
"name": "Perl",
"bytes": "15376"
},
{
"name": "Python",
"bytes": "332042"
},
{
"name": "QMake",
"bytes": "2301"
},
{
"name": "Racket",
"bytes": "5069"
},
{
"name": "Ruby",
"bytes": "88707"
},
{
"name": "Rust",
"bytes": "81497"
},
{
"name": "Scala",
"bytes": "101711"
},
{
"name": "Shell",
"bytes": "96313"
},
{
"name": "Smarty",
"bytes": "436"
},
{
"name": "Swift",
"bytes": "101361"
},
{
"name": "TypeScript",
"bytes": "14303"
},
{
"name": "UrWeb",
"bytes": "4453"
},
{
"name": "Vala",
"bytes": "1579"
},
{
"name": "Visual Basic",
"bytes": "27087"
},
{
"name": "Volt",
"bytes": "511"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/loot_schematic/shared_utensils_schematic.iff"
result.attribute_template_id = -1
result.stfName("craft_item_ingredients_n","utensils")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "a06c4974f0509d709a42064cd1eb803e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 86,
"avg_line_length": 25.384615384615383,
"alnum_prop": 0.7121212121212122,
"repo_name": "anhstudios/swganh",
"id": "a5340af8dd42a65f660ee88de24452a062abb145",
"size": "475",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/loot/loot_schematic/shared_utensils_schematic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
import numpy as np
from collections import Counter
from MachineLearning.Distances import DistancesMatrix
class NearestNeighbor:
def __init__(self,k):
self.k=k
def train(self,X,y):
#X is matrix NlearnXp y is 1d-vector of length Nlearn
self.X=X
self.y=y
self.Nl=X.shape[0]
def query(self,Xt,typeD='Euc'):
Ntest=Xt.shape[0]
ypred = np.zeros(Ntest)
tD=typeD
Dist=DistancesMatrix(self.X,Xt,self.Nl,Ntest,typeD=tD,T=False)
if self.k != 1:
ind=np.argsort(Dist,axis=1)
ypred = [Counter(self.y[ind[rt,0:self.k]]).most_common()[0][0] for rt in range(Ntest)]
#for rt in range(Ntest):
# ypred[rt]=Counter(self.y[ind[rt,0:self.k]]).most_common()[0][0]
else:
ind=np.argmin(Dist,axis=1)
ypred=self.y[ind]
return ypred
| {
"content_hash": "c5d778c3eb0f1e1e92e31b8c248285ec",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 89,
"avg_line_length": 23.625,
"alnum_prop": 0.6785714285714286,
"repo_name": "L-F-A/Machine-Learning",
"id": "8ba4610c1a2a08685e148f1db05d85892965f264",
"size": "756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NNeighbor/NN.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "134484"
}
],
"symlink_target": ""
} |
"""
Reactor that uses IO completion ports
"""
from twisted.internet import base, interfaces, main, error
from twisted.python import log, failure
from twisted.internet._dumbwin32proc import Process
from zope.interface import implements
import socket, sys
from twisted.internet.iocpreactor import iocpsupport as _iocp
from twisted.internet.iocpreactor.const import WAIT_TIMEOUT
from twisted.internet.iocpreactor import tcp, udp
from twisted.python.compat import set
MAX_TIMEOUT = 2000 # 2 seconds, see doIteration for explanation
EVENTS_PER_LOOP = 1000 # XXX: what's a good value here?
# keys to associate with normal and waker events
KEY_NORMAL, KEY_WAKEUP = range(2)
_NO_GETHANDLE = error.ConnectionFdescWentAway(
'Handler has no getFileHandle method')
_NO_FILEDESC = error.ConnectionFdescWentAway('Filedescriptor went away')
class IOCPReactor(base._SignalReactorMixin, base.ReactorBase):
implements(interfaces.IReactorTCP, interfaces.IReactorUDP,
interfaces.IReactorMulticast, interfaces.IReactorProcess)
port = None
def __init__(self):
base.ReactorBase.__init__(self)
self.port = _iocp.CompletionPort()
self.handles = set()
def addActiveHandle(self, handle):
self.handles.add(handle)
def removeActiveHandle(self, handle):
self.handles.discard(handle)
def doIteration(self, timeout):
# This function sits and waits for an IO completion event.
#
# There are two requirements: process IO events as soon as they arrive
# and process ctrl-break from the user in a reasonable amount of time.
#
# There are three kinds of waiting.
# 1) GetQueuedCompletionStatus (self.port.getEvent) to wait for IO
# events only.
# 2) Msg* family of wait functions that can stop waiting when
# ctrl-break is detected (then, I think, Python converts it into a
# KeyboardInterrupt)
# 3) *Ex family of wait functions that put the thread into an
# "alertable" wait state which is supposedly triggered by IO completion
#
# 2) and 3) can be combined. Trouble is, my IO completion is not
# causing 3) to trigger, possibly because I do not use an IO completion
# callback. Windows is weird.
# There are two ways to handle this. I could use MsgWaitForSingleObject
# here and GetQueuedCompletionStatus in a thread. Or I could poll with
# a reasonable interval. Guess what! Threads are hard.
processed_events = 0
if timeout is None:
timeout = MAX_TIMEOUT
else:
timeout = min(MAX_TIMEOUT, int(1000*timeout))
rc, bytes, key, evt = self.port.getEvent(timeout)
while processed_events < EVENTS_PER_LOOP:
if rc == WAIT_TIMEOUT:
break
if key != KEY_WAKEUP:
assert key == KEY_NORMAL
if not evt.ignore:
log.callWithLogger(evt.owner, self._callEventCallback,
rc, bytes, evt)
processed_events += 1
rc, bytes, key, evt = self.port.getEvent(0)
def _callEventCallback(self, rc, bytes, evt):
owner = evt.owner
why = None
try:
evt.callback(rc, bytes, evt)
handfn = getattr(owner, 'getFileHandle', None)
if not handfn:
why = _NO_GETHANDLE
elif handfn() == -1:
why = _NO_FILEDESC
if why:
return # ignore handles that were closed
except:
why = sys.exc_info()[1]
log.err()
if why:
owner.loseConnection(failure.Failure(why))
def installWaker(self):
pass
def wakeUp(self):
self.port.postEvent(0, KEY_WAKEUP, None)
def registerHandle(self, handle):
self.port.addHandle(handle, KEY_NORMAL)
def createSocket(self, af, stype):
skt = socket.socket(af, stype)
self.registerHandle(skt.fileno())
return skt
def listenTCP(self, port, factory, backlog=50, interface=''):
"""
@see: twisted.internet.interfaces.IReactorTCP.listenTCP
"""
p = tcp.Port(port, factory, backlog, interface, self)
p.startListening()
return p
def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
"""
@see: twisted.internet.interfaces.IReactorTCP.connectTCP
"""
c = tcp.Connector(host, port, factory, timeout, bindAddress, self)
c.connect()
return c
def listenUDP(self, port, protocol, interface='', maxPacketSize=8192):
"""
Connects a given L{DatagramProtocol} to the given numeric UDP port.
@returns: object conforming to L{IListeningPort}.
"""
p = udp.Port(port, protocol, interface, maxPacketSize, self)
p.startListening()
return p
def listenMulticast(self, port, protocol, interface='', maxPacketSize=8192,
listenMultiple=False):
"""
Connects a given DatagramProtocol to the given numeric UDP port.
EXPERIMENTAL.
@returns: object conforming to IListeningPort.
"""
p = udp.MulticastPort(port, protocol, interface, maxPacketSize, self,
listenMultiple)
p.startListening()
return p
def spawnProcess(self, processProtocol, executable, args=(), env={},
path=None, uid=None, gid=None, usePTY=0, childFDs=None):
"""
Spawn a process.
"""
if uid is not None:
raise ValueError("Setting UID is unsupported on this platform.")
if gid is not None:
raise ValueError("Setting GID is unsupported on this platform.")
if usePTY:
raise ValueError("PTYs are unsupported on this platform.")
if childFDs is not None:
raise ValueError(
"Custom child file descriptor mappings are unsupported on "
"this platform.")
args, env = self._checkProcessArgs(args, env)
return Process(self, processProtocol, executable, args, env, path)
def removeAll(self):
res = list(self.handles)
self.handles.clear()
return res
def install():
r = IOCPReactor()
main.installReactor(r)
__all__ = ['IOCPReactor', 'install']
| {
"content_hash": "5976f8fe4be976dbdbfc3a42ca330c90",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 79,
"avg_line_length": 31.68780487804878,
"alnum_prop": 0.6142241379310345,
"repo_name": "hortonworks/hortonworks-sandbox",
"id": "08c4a36b3d2da24e6d5c9b74fba7f48402d0f4d4",
"size": "6634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/Twisted/twisted/internet/iocpreactor/reactor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "27264"
},
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10279874"
},
{
"name": "C++",
"bytes": "208068"
},
{
"name": "CSS",
"bytes": "356769"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3064179"
},
{
"name": "JavaScript",
"bytes": "1532806"
},
{
"name": "PHP",
"bytes": "4160"
},
{
"name": "Perl",
"bytes": "139518"
},
{
"name": "Python",
"bytes": "27735073"
},
{
"name": "R",
"bytes": "12290"
},
{
"name": "Ruby",
"bytes": "5050"
},
{
"name": "Shell",
"bytes": "42062"
},
{
"name": "XSLT",
"bytes": "585"
}
],
"symlink_target": ""
} |
"""Implementation of magic functions for matplotlib/pylab support.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Our own packages
from traitlets.config.application import Application
from IPython.core import magic_arguments
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.testing.skipdoctest import skip_doctest
from warnings import warn
from IPython.core.pylabtools import backends
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
magic_gui_arg = magic_arguments.argument(
'gui', nargs='?',
help="""Name of the matplotlib backend to use %s.
If given, the corresponding matplotlib backend is used,
otherwise it will be matplotlib's default
(which you can set in your matplotlib config file).
""" % str(tuple(sorted(backends.keys())))
)
@magics_class
class PylabMagics(Magics):
"""Magics related to matplotlib's pylab support"""
@skip_doctest
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument('-l', '--list', action='store_true',
help='Show available matplotlib backends')
@magic_gui_arg
def matplotlib(self, line=''):
"""Set up matplotlib to work interactively.
This function lets you activate matplotlib interactive support
at any point during an IPython session. It does not import anything
into the interactive namespace.
If you are using the inline matplotlib backend in the IPython Notebook
you can set which figure formats are enabled using the following::
In [1]: from IPython.display import set_matplotlib_formats
In [2]: set_matplotlib_formats('pdf', 'svg')
The default for inline figures sets `bbox_inches` to 'tight'. This can
cause discrepancies between the displayed image and the identical
image created using `savefig`. This behavior can be disabled using the
`%config` magic::
In [3]: %config InlineBackend.print_figure_kwargs = {'bbox_inches':None}
In addition, see the docstring of
`IPython.display.set_matplotlib_formats` and
`IPython.display.set_matplotlib_close` for more information on
changing additional behaviors of the inline backend.
Examples
--------
To enable the inline backend for usage with the IPython Notebook::
In [1]: %matplotlib inline
In this case, where the matplotlib default is TkAgg::
In [2]: %matplotlib
Using matplotlib backend: TkAgg
But you can explicitly request a different GUI backend::
In [3]: %matplotlib qt
You can list the available backends using the -l/--list option::
In [4]: %matplotlib --list
Available matplotlib backends: ['osx', 'qt4', 'qt5', 'gtk3', 'notebook', 'wx', 'qt', 'nbagg',
'gtk', 'tk', 'inline']
"""
args = magic_arguments.parse_argstring(self.matplotlib, line)
if args.list:
backends_list = list(backends.keys())
print("Available matplotlib backends: %s" % backends_list)
else:
gui, backend = self.shell.enable_matplotlib(args.gui)
self._show_matplotlib_backend(args.gui, backend)
@skip_doctest
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'--no-import-all', action='store_true', default=None,
help="""Prevent IPython from performing ``import *`` into the interactive namespace.
You can govern the default behavior of this flag with the
InteractiveShellApp.pylab_import_all configurable.
"""
)
@magic_gui_arg
def pylab(self, line=''):
"""Load numpy and matplotlib to work interactively.
This function lets you activate pylab (matplotlib, numpy and
interactive support) at any point during an IPython session.
%pylab makes the following imports::
import numpy
import matplotlib
from matplotlib import pylab, mlab, pyplot
np = numpy
plt = pyplot
from IPython.display import display
from IPython.core.pylabtools import figsize, getfigs
from pylab import *
from numpy import *
If you pass `--no-import-all`, the last two `*` imports will be excluded.
See the %matplotlib magic for more details about activating matplotlib
without affecting the interactive namespace.
"""
args = magic_arguments.parse_argstring(self.pylab, line)
if args.no_import_all is None:
# get default from Application
if Application.initialized():
app = Application.instance()
try:
import_all = app.pylab_import_all
except AttributeError:
import_all = True
else:
# nothing specified, no app - default True
import_all = True
else:
# invert no-import flag
import_all = not args.no_import_all
gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all)
self._show_matplotlib_backend(args.gui, backend)
print ("Populating the interactive namespace from numpy and matplotlib")
if clobbered:
warn("pylab import has clobbered these variables: %s" % clobbered +
"\n`%matplotlib` prevents importing * from pylab and numpy"
)
def _show_matplotlib_backend(self, gui, backend):
"""show matplotlib message backend message"""
if not gui or gui == 'auto':
print("Using matplotlib backend: %s" % backend)
| {
"content_hash": "5df1a05545169f9736a31ef65e2fb0d3",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 104,
"avg_line_length": 39.25903614457831,
"alnum_prop": 0.5771060303820776,
"repo_name": "unnikrishnankgs/va",
"id": "f3c70a34072e1eeea34deb88697de8d9f683cfe8",
"size": "6517",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "venv/lib/python3.5/site-packages/IPython/core/magics/pylab.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "1836035"
},
{
"name": "C++",
"bytes": "12002305"
},
{
"name": "CMake",
"bytes": "128"
},
{
"name": "CSS",
"bytes": "64776"
},
{
"name": "Cuda",
"bytes": "78890"
},
{
"name": "Fortran",
"bytes": "8281"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "297329"
},
{
"name": "JavaScript",
"bytes": "4313047"
},
{
"name": "Jupyter Notebook",
"bytes": "603900"
},
{
"name": "Makefile",
"bytes": "7573"
},
{
"name": "Nginx",
"bytes": "544"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Protocol Buffer",
"bytes": "72897"
},
{
"name": "PureBasic",
"bytes": "134"
},
{
"name": "Python",
"bytes": "51104955"
},
{
"name": "Shell",
"bytes": "71646"
},
{
"name": "Smarty",
"bytes": "28890"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd.scipy.special import gammaln
from autograd import grad
import scipy.optimize
# The code in this example implements a method for finding a stationary point of
# the negative binomial likelihood via Newton's method, described here:
# https://en.wikipedia.org/wiki/Negative_binomial_distribution#Maximum_likelihood_estimation
def newton(f, x0):
# wrap scipy.optimize.newton with our automatic derivatives
return scipy.optimize.newton(f, x0, fprime=grad(f), fprime2=grad(grad(f)))
def negbin_loglike(r, p, x):
# the negative binomial log likelihood we want to maximize
return gammaln(r+x) - gammaln(r) - gammaln(x+1) + x*np.log(p) + r*np.log(1-p)
def negbin_sample(r, p, size):
# a negative binomial is a gamma-compound-Poisson
return npr.poisson(npr.gamma(r, p/(1-p), size=size))
def fit_maxlike(x, r_guess):
# follows Wikipedia's section on negative binomial max likelihood
assert np.var(x) > np.mean(x), "Likelihood-maximizing parameters don't exist!"
loglike = lambda r, p: np.sum(negbin_loglike(r, p, x))
p = lambda r: np.sum(x) / np.sum(r+x)
rprime = lambda r: grad(loglike)(r, p(r))
r = newton(rprime, r_guess)
return r, p(r)
if __name__ == "__main__":
# generate data
npr.seed(0)
data = negbin_sample(r=5, p=0.5, size=1000)
# fit likelihood-extremizing parameters
r, p = fit_maxlike(data, r_guess=1)
# report fit
print('Fit parameters:')
print('r={r}, p={p}'.format(r=r, p=p))
print('Check that we are at a local stationary point:')
loglike = lambda r, p: np.sum(negbin_loglike(r, p, data))
grad_both = grad(loglike, argnum=(0, 1))
print(grad_both(r, p))
import matplotlib.pyplot as plt
xm = data.max()
plt.figure()
plt.hist(data, bins=np.arange(xm+1)-0.5, normed=True, label='normed data counts')
plt.xlim(0,xm)
plt.plot(np.arange(xm), np.exp(negbin_loglike(r, p, np.arange(xm))), label='maxlike fit')
plt.xlabel('k')
plt.ylabel('p(k)')
plt.legend(loc='best')
plt.show()
| {
"content_hash": "26fb13583e0243a9d10db321db7db9cb",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 93,
"avg_line_length": 32.71212121212121,
"alnum_prop": 0.6702176933765632,
"repo_name": "HIPS/autograd",
"id": "47beaa5f7c861f9f535a0a96572798cb4694902f",
"size": "2159",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/negative_binomial_maxlike.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "275938"
},
{
"name": "Shell",
"bytes": "950"
}
],
"symlink_target": ""
} |
'''
Created on 2013-5-8
@author: lan (www.9miao.com)
'''
from DBUtils.PooledDB import PooledDB
import MySQLdb
DBCS = {'mysql':MySQLdb,}
class DBPool(object):
'''
'''
def initPool(self,**kw):
'''
'''
self.config = kw
creator = DBCS.get(kw.get('engine','mysql'),MySQLdb)
self.pool = PooledDB(creator,5,**kw)
def connection(self):
return self.pool.connection()
dbpool = DBPool()
| {
"content_hash": "e6fd83c98e5d2a4abaa441f655c58140",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 60,
"avg_line_length": 18.16,
"alnum_prop": 0.5704845814977973,
"repo_name": "fuhongxue/Firefly",
"id": "97307d3dbba94e91f3dfbc998842fc3afe262b17",
"size": "467",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "firefly/dbentrust/dbpool.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import os
import sys
from django.db.backends import BaseDatabaseClient
class DatabaseClient(BaseDatabaseClient):
executable_name = 'mysql'
def runshell(self):
settings_dict = self.connection.settings_dict
args = [self.executable_name]
db = settings_dict['OPTIONS'].get('db', settings_dict['NAME'])
user = settings_dict['OPTIONS'].get('user', settings_dict['USER'])
passwd = settings_dict['OPTIONS'].get('passwd', settings_dict['PASSWORD'])
host = settings_dict['OPTIONS'].get('host', settings_dict['HOST'])
port = settings_dict['OPTIONS'].get('port', settings_dict['PORT'])
defaults_file = settings_dict['OPTIONS'].get('read_default_file')
# Seems to be no good way to set sql_mode with CLI.
if defaults_file:
args += ["--defaults-file=%s" % defaults_file]
if user:
args += ["--user=%s" % user]
if passwd:
args += ["--password=%s" % passwd]
if host:
if '/' in host:
args += ["--socket=%s" % host]
else:
args += ["--host=%s" % host]
if port:
args += ["--port=%s" % port]
if db:
args += [db]
if os.name == 'nt':
sys.exit(os.system(" ".join(args)))
else:
os.execvp(self.executable_name, args)
| {
"content_hash": "480d3f74593c2551f2743937dd3c03c8",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 82,
"avg_line_length": 35.45,
"alnum_prop": 0.5239774330042313,
"repo_name": "scottferg/web-console",
"id": "cb4d1814653c3831c3a486ba76dc8a496957af5c",
"size": "1418",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "django/db/backends/mysql/client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "107585"
},
{
"name": "Python",
"bytes": "4062006"
}
],
"symlink_target": ""
} |
"""
Classes for the efficient drawing of large collections of objects that
share most properties, e.g., a large number of line segments or
polygons.
The classes are not meant to be as flexible as their single element
counterparts (e.g., you may not be able to select all line styles) but
they are meant to be fast for common use cases (e.g., a large set of solid
line segemnts)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
import warnings
import numpy as np
import numpy.ma as ma
import matplotlib as mpl
import matplotlib.cbook as cbook
import matplotlib.colors as mcolors
import matplotlib.cm as cm
from matplotlib import docstring
import matplotlib.transforms as transforms
import matplotlib.artist as artist
from matplotlib.artist import allow_rasterization
import matplotlib.backend_bases as backend_bases
import matplotlib.path as mpath
from matplotlib import _path
import matplotlib.mlab as mlab
CIRCLE_AREA_FACTOR = 1.0 / np.sqrt(np.pi)
class Collection(artist.Artist, cm.ScalarMappable):
"""
Base class for Collections. Must be subclassed to be usable.
All properties in a collection must be sequences or scalars;
if scalars, they will be converted to sequences. The
property of the ith element of the collection is::
prop[i % len(props)]
Keyword arguments and default values:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *offset_position*: 'screen' (default) or 'data'
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *hatch*: None
* *zorder*: 1
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets). If offset_position is 'screen'
(default) the offset is applied after the master transform has
been applied, that is, the offsets are in screen coordinates. If
offset_position is 'data', the offset is applied before the master
transform, i.e., the offsets are in data coordinates.
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional. If
the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not None
(i.e., a call to set_array has been made), at draw time a call to
scalar mappable will be made to set the face colors.
"""
_offsets = np.array([], np.float_)
# _offsets must be a Nx2 array!
_offsets.shape = (0, 2)
_transOffset = transforms.IdentityTransform()
_transforms = []
def __init__(self,
edgecolors=None,
facecolors=None,
linewidths=None,
linestyles='solid',
antialiaseds=None,
offsets=None,
transOffset=None,
norm=None, # optional for ScalarMappable
cmap=None, # ditto
pickradius=5.0,
hatch=None,
urls=None,
offset_position='screen',
zorder=1,
**kwargs
):
"""
Create a Collection
%(Collection)s
"""
artist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
self.set_edgecolor(edgecolors)
self.set_facecolor(facecolors)
self.set_linewidth(linewidths)
self.set_linestyle(linestyles)
self.set_antialiased(antialiaseds)
self.set_pickradius(pickradius)
self.set_urls(urls)
self.set_hatch(hatch)
self.set_offset_position(offset_position)
self.set_zorder(zorder)
self._uniform_offsets = None
self._offsets = np.array([[0, 0]], np.float_)
if offsets is not None:
offsets = np.asanyarray(offsets)
offsets.shape = (-1, 2) # Make it Nx2
if transOffset is not None:
self._offsets = offsets
self._transOffset = transOffset
else:
self._uniform_offsets = offsets
self._path_effects = None
self.update(kwargs)
self._paths = None
@staticmethod
def _get_value(val):
try:
return (float(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try:
float(val[0])
except (TypeError, ValueError):
pass # raise below
else:
return val
raise TypeError('val must be a float or nonzero sequence of floats')
@staticmethod
def _get_bool(val):
if not cbook.iterable(val):
val = (val,)
try:
bool(val[0])
except (TypeError, IndexError):
raise TypeError('val must be a bool or nonzero sequence of them')
return val
def get_paths(self):
return self._paths
def set_paths(self):
raise NotImplementedError
def get_transforms(self):
return self._transforms
def get_offset_transform(self):
t = self._transOffset
if (not isinstance(t, transforms.Transform)
and hasattr(t, '_as_mpl_transform')):
t = t._as_mpl_transform(self.axes)
return t
def get_datalim(self, transData):
transform = self.get_transform()
transOffset = self.get_offset_transform()
offsets = self._offsets
paths = self.get_paths()
if not transform.is_affine:
paths = [transform.transform_path_non_affine(p) for p in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
offsets = np.asanyarray(offsets, np.float_)
if np.ma.isMaskedArray(offsets):
offsets = offsets.filled(np.nan)
# get_path_collection_extents handles nan but not masked arrays
offsets.shape = (-1, 2) # Make it Nx2
if len(paths) and len(offsets):
result = mpath.get_path_collection_extents(
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset.frozen())
result = result.inverse_transformed(transData)
else:
result = transforms.Bbox.null()
return result
def get_window_extent(self, renderer):
# TODO:check to ensure that this does not fail for
# cases other than scatter plot legend
return self.get_datalim(transforms.IdentityTransform())
def _prepare_points(self):
"""Point prep for drawing and hit testing"""
transform = self.get_transform()
transOffset = self.get_offset_transform()
offsets = self._offsets
paths = self.get_paths()
if self.have_units():
paths = []
for path in self.get_paths():
vertices = path.vertices
xs, ys = vertices[:, 0], vertices[:, 1]
xs = self.convert_xunits(xs)
ys = self.convert_yunits(ys)
paths.append(mpath.Path(list(zip(xs, ys)), path.codes))
if offsets.size > 0:
xs = self.convert_xunits(offsets[:, 0])
ys = self.convert_yunits(offsets[:, 1])
offsets = list(zip(xs, ys))
offsets = np.asanyarray(offsets, np.float_)
offsets.shape = (-1, 2) # Make it Nx2
if not transform.is_affine:
paths = [transform.transform_path_non_affine(path)
for path in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
# This might have changed an ndarray into a masked array.
transOffset = transOffset.get_affine()
if np.ma.isMaskedArray(offsets):
offsets = offsets.filled(np.nan)
# Changing from a masked array to nan-filled ndarray
# is probably most efficient at this point.
return transform, transOffset, offsets, paths
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__, self.get_gid())
self.update_scalarmappable()
transform, transOffset, offsets, paths = self._prepare_points()
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_snap(self.get_snap())
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
# If the collection is made up of a single shape/color/stroke,
# it can be rendered once and blitted multiple times, using
# `draw_markers` rather than `draw_path_collection`. This is
# *much* faster for Agg, and results in smaller file sizes in
# PDF/SVG/PS.
trans = self.get_transforms()
facecolors = self.get_facecolor()
edgecolors = self.get_edgecolor()
do_single_path_optimization = False
if (len(paths) == 1 and len(trans) <= 1 and
len(facecolors) == 1 and len(edgecolors) == 1 and
len(self._linewidths) == 1 and
self._linestyles == [(None, None)] and
len(self._antialiaseds) == 1 and len(self._urls) == 1 and
self.get_hatch() is None):
if len(trans):
combined_transform = (transforms.Affine2D(trans[0]) +
transform)
else:
combined_transform = transform
extents = paths[0].get_extents(combined_transform)
width, height = renderer.get_canvas_width_height()
if (extents.width < width and
extents.height < height):
do_single_path_optimization = True
if do_single_path_optimization:
gc.set_foreground(tuple(edgecolors[0]))
gc.set_linewidth(self._linewidths[0])
gc.set_linestyle(self._linestyles[0])
gc.set_antialiased(self._antialiaseds[0])
gc.set_url(self._urls[0])
renderer.draw_markers(
gc, paths[0], combined_transform.frozen(),
mpath.Path(offsets), transOffset, tuple(facecolors[0]))
else:
renderer.draw_path_collection(
gc, transform.frozen(), paths,
self.get_transforms(), offsets, transOffset,
self.get_facecolor(), self.get_edgecolor(),
self._linewidths, self._linestyles,
self._antialiaseds, self._urls,
self._offset_position)
gc.restore()
renderer.close_group(self.__class__.__name__)
def set_pickradius(self, pr):
self._pickradius = pr
def get_pickradius(self):
return self._pickradius
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the collection.
Returns True | False, ``dict(ind=itemlist)``, where every
item in itemlist contains the event.
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
if not self.get_visible():
return False, {}
if self._picker is True: # the Boolean constant, not just nonzero or 1
pickradius = self._pickradius
else:
try:
pickradius = float(self._picker)
except TypeError:
# This should not happen if "contains" is called via
# pick, the normal route; the check is here in case
# it is called through some unanticipated route.
warnings.warn(
"Collection picker %s could not be converted to float"
% self._picker)
pickradius = self._pickradius
transform, transOffset, offsets, paths = self._prepare_points()
ind = _path.point_in_path_collection(
mouseevent.x, mouseevent.y, pickradius,
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset, pickradius <= 0,
self.get_offset_position())
return len(ind) > 0, dict(ind=ind)
def set_urls(self, urls):
if urls is None:
self._urls = [None, ]
else:
self._urls = urls
def get_urls(self):
return self._urls
def set_hatch(self, hatch):
"""
Set the hatching pattern
*hatch* can be one of::
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
x - crossed diagonal
o - small circle
O - large circle
. - dots
* - stars
Letters can be combined, in which case all the specified
hatchings are done. If same letter repeats, it increases the
density of hatching of that pattern.
Hatching is supported in the PostScript, PDF, SVG and Agg
backends only.
Unlike other properties such as linewidth and colors, hatching
can only be specified for the collection as a whole, not separately
for each member.
ACCEPTS: [ '/' | '\\\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*' ]
"""
self._hatch = hatch
def get_hatch(self):
'Return the current hatching pattern'
return self._hatch
def set_offsets(self, offsets):
"""
Set the offsets for the collection. *offsets* can be a scalar
or a sequence.
ACCEPTS: float or sequence of floats
"""
offsets = np.asanyarray(offsets, np.float_)
offsets.shape = (-1, 2) # Make it Nx2
#This decision is based on how they are initialized above
if self._uniform_offsets is None:
self._offsets = offsets
else:
self._uniform_offsets = offsets
def get_offsets(self):
"""
Return the offsets for the collection.
"""
#This decision is based on how they are initialized above in __init__()
if self._uniform_offsets is None:
return self._offsets
else:
return self._uniform_offsets
def set_offset_position(self, offset_position):
"""
Set how offsets are applied. If *offset_position* is 'screen'
(default) the offset is applied after the master transform has
been applied, that is, the offsets are in screen coordinates.
If offset_position is 'data', the offset is applied before the
master transform, i.e., the offsets are in data coordinates.
"""
if offset_position not in ('screen', 'data'):
raise ValueError("offset_position must be 'screen' or 'data'")
self._offset_position = offset_position
def get_offset_position(self):
"""
Returns how offsets are applied for the collection. If
*offset_position* is 'screen', the offset is applied after the
master transform has been applied, that is, the offsets are in
screen coordinates. If offset_position is 'data', the offset
is applied before the master transform, i.e., the offsets are
in data coordinates.
"""
return self._offset_position
def set_linewidth(self, lw):
"""
Set the linewidth(s) for the collection. *lw* can be a scalar
or a sequence; if it is a sequence the patches will cycle
through the sequence
ACCEPTS: float or sequence of floats
"""
if lw is None:
lw = mpl.rcParams['patch.linewidth']
self._linewidths = self._get_value(lw)
def set_linewidths(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the linestyle(s) for the collection.
ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |
(offset, on-off-dash-seq) ]
"""
try:
dashd = backend_bases.GraphicsContextBase.dashd
if cbook.is_string_like(ls):
if ls in dashd:
dashes = [dashd[ls]]
elif ls in cbook.ls_mapper:
dashes = [dashd[cbook.ls_mapper[ls]]]
else:
raise ValueError()
elif cbook.iterable(ls):
try:
dashes = []
for x in ls:
if cbook.is_string_like(x):
if x in dashd:
dashes.append(dashd[x])
elif x in cbook.ls_mapper:
dashes.append(dashd[cbook.ls_mapper[x]])
else:
raise ValueError()
elif cbook.iterable(x) and len(x) == 2:
dashes.append(x)
else:
raise ValueError()
except ValueError:
if len(ls) == 2:
dashes = ls
else:
raise ValueError()
else:
raise ValueError()
except ValueError:
raise ValueError('Do not know how to convert %s to dashes' % ls)
self._linestyles = dashes
def set_linestyles(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_dashes(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_antialiased(self, aa):
"""
Set the antialiasing state for rendering.
ACCEPTS: Boolean or sequence of booleans
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiaseds = self._get_bool(aa)
def set_antialiaseds(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
For setting the edge or face color individually.
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_facecolor(self, c):
"""
Set the facecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence of rgba tuples; if it is a sequence the patches will
cycle through the sequence.
If *c* is 'none', the patch will not be filled.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
self._is_filled = True
try:
if c.lower() == 'none':
self._is_filled = False
except AttributeError:
pass
if c is None:
c = mpl.rcParams['patch.facecolor']
self._facecolors_original = c
self._facecolors = mcolors.colorConverter.to_rgba_array(c, self._alpha)
def set_facecolors(self, c):
"""alias for set_facecolor"""
return self.set_facecolor(c)
def get_facecolor(self):
return self._facecolors
get_facecolors = get_facecolor
def get_edgecolor(self):
if self._edgecolors == str('face'):
return self.get_facecolors()
else:
return self._edgecolors
get_edgecolors = get_edgecolor
def set_edgecolor(self, c):
"""
Set the edgecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence of rgba tuples; if it is a sequence the patches will
cycle through the sequence.
If *c* is 'face', the edge color will always be the same as
the face color. If it is 'none', the patch boundary will not
be drawn.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
self._is_stroked = True
try:
if c.lower() == 'none':
self._is_stroked = False
except AttributeError:
pass
try:
if c.lower() == 'face':
self._edgecolors = 'face'
self._edgecolors_original = 'face'
return
except AttributeError:
pass
if c is None:
c = mpl.rcParams['patch.edgecolor']
self._edgecolors_original = c
self._edgecolors = mcolors.colorConverter.to_rgba_array(c, self._alpha)
def set_edgecolors(self, c):
"""alias for set_edgecolor"""
return self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float or *None*.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = mcolors.colorConverter.to_rgba_array(
self._facecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
if self._edgecolors_original != str('face'):
self._edgecolors = mcolors.colorConverter.to_rgba_array(
self._edgecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
def get_linewidths(self):
return self._linewidths
get_linewidth = get_linewidths
def get_linestyles(self):
return self._linestyles
get_dashes = get_linestyle = get_linestyles
def update_scalarmappable(self):
"""
If the scalar mappable array is not none, update colors
from scalar data
"""
if self._A is None:
return
if self._A.ndim > 1:
raise ValueError('Collections can only map rank 1 arrays')
if not self.check_update("array"):
return
if self._is_filled:
self._facecolors = self.to_rgba(self._A, self._alpha)
elif self._is_stroked:
self._edgecolors = self.to_rgba(self._A, self._alpha)
def update_from(self, other):
'copy properties from other to self'
artist.Artist.update_from(self, other)
self._antialiaseds = other._antialiaseds
self._edgecolors_original = other._edgecolors_original
self._edgecolors = other._edgecolors
self._facecolors_original = other._facecolors_original
self._facecolors = other._facecolors
self._linewidths = other._linewidths
self._linestyles = other._linestyles
self._pickradius = other._pickradius
self._hatch = other._hatch
# update_from for scalarmappable
self._A = other._A
self.norm = other.norm
self.cmap = other.cmap
# self.update_dict = other.update_dict # do we need to copy this? -JJL
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object defn
docstring.interpd.update(Collection="""\
Valid Collection keyword arguments:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets)
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
""")
class _CollectionWithSizes(Collection):
"""
Base class for collections that have an array of sizes.
"""
_factor = 1.0
def get_sizes(self):
"""
Returns the sizes of the elements in the collection. The
value represents the 'area' of the element.
Returns
-------
sizes : array
The 'area' of each element.
"""
return self._sizes
def set_sizes(self, sizes, dpi=72.0):
"""
Set the sizes of each member of the collection.
Parameters
----------
sizes : ndarray or None
The size to set for each element of the collection. The
value is the 'area' of the element.
dpi : float
The dpi of the canvas. Defaults to 72.0.
"""
if sizes is None:
self._sizes = np.array([])
self._transforms = np.empty((0, 3, 3))
else:
self._sizes = np.asarray(sizes)
self._transforms = np.zeros((len(self._sizes), 3, 3))
scale = np.sqrt(self._sizes) * dpi / 72.0 * self._factor
self._transforms[:, 0, 0] = scale
self._transforms[:, 1, 1] = scale
self._transforms[:, 2, 2] = 1.0
@allow_rasterization
def draw(self, renderer):
self.set_sizes(self._sizes, self.figure.dpi)
Collection.draw(self, renderer)
class PathCollection(_CollectionWithSizes):
"""
This is the most basic :class:`Collection` subclass.
"""
@docstring.dedent_interpd
def __init__(self, paths, sizes=None, **kwargs):
"""
*paths* is a sequence of :class:`matplotlib.path.Path`
instances.
%(Collection)s
"""
Collection.__init__(self, **kwargs)
self.set_paths(paths)
self.set_sizes(sizes)
def set_paths(self, paths):
self._paths = paths
def get_paths(self):
return self._paths
class PolyCollection(_CollectionWithSizes):
@docstring.dedent_interpd
def __init__(self, verts, sizes=None, closed=True, **kwargs):
"""
*verts* is a sequence of ( *verts0*, *verts1*, ...) where
*verts_i* is a sequence of *xy* tuples of vertices, or an
equivalent :mod:`numpy` array of shape (*nv*, 2).
*sizes* is *None* (default) or a sequence of floats that
scale the corresponding *verts_i*. The scaling is applied
before the Artist master transform; if the latter is an identity
transform, then the overall scaling is such that if
*verts_i* specify a unit square, then *sizes_i* is the area
of that square in points^2.
If len(*sizes*) < *nv*, the additional values will be
taken cyclically from the array.
*closed*, when *True*, will explicitly close the polygon.
%(Collection)s
"""
Collection.__init__(self, **kwargs)
self.set_sizes(sizes)
self.set_verts(verts, closed)
def set_verts(self, verts, closed=True):
'''This allows one to delay initialization of the vertices.'''
if np.ma.isMaskedArray(verts):
verts = verts.astype(np.float_).filled(np.nan)
# This is much faster than having Path do it one at a time.
if closed:
self._paths = []
for xy in verts:
if len(xy):
if np.ma.isMaskedArray(xy):
xy = np.ma.concatenate([xy, xy[0:1]])
else:
xy = np.asarray(xy)
xy = np.concatenate([xy, xy[0:1]])
codes = np.empty(xy.shape[0], dtype=mpath.Path.code_type)
codes[:] = mpath.Path.LINETO
codes[0] = mpath.Path.MOVETO
codes[-1] = mpath.Path.CLOSEPOLY
self._paths.append(mpath.Path(xy, codes))
else:
self._paths.append(mpath.Path(xy))
else:
self._paths = [mpath.Path(xy) for xy in verts]
set_paths = set_verts
class BrokenBarHCollection(PolyCollection):
"""
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
"""
@docstring.dedent_interpd
def __init__(self, xranges, yrange, **kwargs):
"""
*xranges*
sequence of (*xmin*, *xwidth*)
*yrange*
*ymin*, *ywidth*
%(Collection)s
"""
ymin, ywidth = yrange
ymax = ymin + ywidth
verts = [[(xmin, ymin),
(xmin, ymax),
(xmin + xwidth, ymax),
(xmin + xwidth, ymin),
(xmin, ymin)] for xmin, xwidth in xranges]
PolyCollection.__init__(self, verts, **kwargs)
@staticmethod
def span_where(x, ymin, ymax, where, **kwargs):
"""
Create a BrokenBarHCollection to plot horizontal bars from
over the regions in *x* where *where* is True. The bars range
on the y-axis from *ymin* to *ymax*
A :class:`BrokenBarHCollection` is returned. *kwargs* are
passed on to the collection.
"""
xranges = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
if not len(xslice):
continue
xranges.append((xslice[0], xslice[-1] - xslice[0]))
collection = BrokenBarHCollection(
xranges, [ymin, ymax - ymin], **kwargs)
return collection
class RegularPolyCollection(_CollectionWithSizes):
"""Draw a collection of regular polygons with *numsides*."""
_path_generator = mpath.Path.unit_regular_polygon
_factor = CIRCLE_AREA_FACTOR
@docstring.dedent_interpd
def __init__(self,
numsides,
rotation=0,
sizes=(1,),
**kwargs):
"""
*numsides*
the number of sides of the polygon
*rotation*
the rotation of the polygon in radians
*sizes*
gives the area of the circle circumscribing the
regular polygon in points^2
%(Collection)s
Example: see :file:`examples/dynamic_collection.py` for
complete example::
offsets = np.random.rand(20,2)
facecolors = [cm.jet(x) for x in np.random.rand(20)]
black = (0,0,0,1)
collection = RegularPolyCollection(
numsides=5, # a pentagon
rotation=0, sizes=(50,),
facecolors = facecolors,
edgecolors = (black,),
linewidths = (1,),
offsets = offsets,
transOffset = ax.transData,
)
"""
Collection.__init__(self, **kwargs)
self.set_sizes(sizes)
self._numsides = numsides
self._paths = [self._path_generator(numsides)]
self._rotation = rotation
self.set_transform(transforms.IdentityTransform())
def get_numsides(self):
return self._numsides
def get_rotation(self):
return self._rotation
@allow_rasterization
def draw(self, renderer):
self.set_sizes(self._sizes, self.figure.dpi)
self._transforms = [
transforms.Affine2D(x).rotate(-self._rotation).get_matrix()
for x in self._transforms
]
Collection.draw(self, renderer)
class StarPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular stars with *numsides* points."""
_path_generator = mpath.Path.unit_regular_star
class AsteriskPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular asterisks with *numsides* points."""
_path_generator = mpath.Path.unit_regular_asterisk
class LineCollection(Collection):
"""
All parameters must be sequences or scalars; if scalars, they will
be converted to sequences. The property of the ith line
segment is::
prop[i % len(props)]
i.e., the properties cycle if the ``len`` of props is less than the
number of segments.
"""
def __init__(self, segments, # Can be None.
linewidths=None,
colors=None,
antialiaseds=None,
linestyles='solid',
offsets=None,
transOffset=None,
norm=None,
cmap=None,
pickradius=5,
zorder=2,
**kwargs
):
"""
*segments*
a sequence of (*line0*, *line1*, *line2*), where::
linen = (x0, y0), (x1, y1), ... (xm, ym)
or the equivalent numpy array with two columns. Each line
can be a different length.
*colors*
must be a sequence of RGBA tuples (e.g., arbitrary color
strings, etc, not allowed).
*antialiaseds*
must be a sequence of ones or zeros
*linestyles* [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
a string or dash tuple. The dash tuple is::
(offset, onoffseq),
where *onoffseq* is an even length tuple of on and off ink
in points.
If *linewidths*, *colors*, or *antialiaseds* is None, they
default to their rcParams setting, in sequence form.
If *offsets* and *transOffset* are not None, then
*offsets* are transformed by *transOffset* and applied after
the segments have been transformed to display coordinates.
If *offsets* is not None but *transOffset* is None, then the
*offsets* are added to the segments before any transformation.
In this case, a single offset can be specified as::
offsets=(xo,yo)
and this value will be added cumulatively to each successive
segment, so as to produce a set of successively offset curves.
*norm*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*cmap*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*pickradius* is the tolerance for mouse clicks picking a line.
The default is 5 pt.
*zorder*
The zorder of the LineCollection. Default is 2
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` array
:attr:`~matplotlib.cm.ScalarMappable._A` is not None (i.e., a call to
:meth:`~matplotlib.cm.ScalarMappable.set_array` has been made), at
draw time a call to scalar mappable will be made to set the colors.
"""
if colors is None:
colors = mpl.rcParams['lines.color']
if linewidths is None:
linewidths = (mpl.rcParams['lines.linewidth'],)
if antialiaseds is None:
antialiaseds = (mpl.rcParams['lines.antialiased'],)
self.set_linestyles(linestyles)
colors = mcolors.colorConverter.to_rgba_array(colors)
Collection.__init__(
self,
edgecolors=colors,
facecolors='none',
linewidths=linewidths,
linestyles=linestyles,
antialiaseds=antialiaseds,
offsets=offsets,
transOffset=transOffset,
norm=norm,
cmap=cmap,
pickradius=pickradius,
zorder=zorder,
**kwargs)
self.set_segments(segments)
def set_segments(self, segments):
if segments is None:
return
_segments = []
for seg in segments:
if not np.ma.isMaskedArray(seg):
seg = np.asarray(seg, np.float_)
_segments.append(seg)
if self._uniform_offsets is not None:
_segments = self._add_offsets(_segments)
self._paths = [mpath.Path(seg) for seg in _segments]
set_verts = set_segments # for compatibility with PolyCollection
set_paths = set_segments
def get_segments(self):
segments = []
for path in self._paths:
vertices = [vertex for vertex, _ in path.iter_segments()]
vertices = np.asarray(vertices)
segments.append(vertices)
return segments
def _add_offsets(self, segs):
offsets = self._uniform_offsets
Nsegs = len(segs)
Noffs = offsets.shape[0]
if Noffs == 1:
for i in range(Nsegs):
segs[i] = segs[i] + i * offsets
else:
for i in range(Nsegs):
io = i % Noffs
segs[i] = segs[i] + offsets[io:io + 1]
return segs
def set_color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
self.set_edgecolor(c)
def color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
warnings.warn('LineCollection.color deprecated; use set_color instead')
return self.set_color(c)
def get_color(self):
return self._edgecolors
get_colors = get_color # for compatibility with old versions
class EventCollection(LineCollection):
'''
A collection of discrete events.
An event is a 1-dimensional value, usually the position of something along
an axis, such as time or length. Events do not have an amplitude. They
are displayed as v
'''
def __init__(self,
positions, # Can be None.
orientation=None,
lineoffset=0,
linelength=1,
linewidth=None,
color=None,
linestyle='solid',
antialiased=None,
**kwargs
):
"""
*positions*
a sequence of numerical values or a 1D numpy array. Can be None
*orientation* [ 'horizontal' | 'vertical' | None ]
defaults to 'horizontal' if not specified or None
*lineoffset*
a single numerical value, corresponding to the offset of the center
of the markers from the origin
*linelength*
a single numerical value, corresponding to the total height of the
marker (i.e. the marker stretches from lineoffset+linelength/2 to
lineoffset-linelength/2). Defaults to 1
*linewidth*
a single numerical value
*color*
must be a sequence of RGBA tuples (e.g., arbitrary color
strings, etc, not allowed).
*linestyle* [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
*antialiased*
1 or 2
If *linewidth*, *color*, or *antialiased* is None, they
default to their rcParams setting, in sequence form.
*norm*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*cmap*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*pickradius* is the tolerance for mouse clicks picking a line.
The default is 5 pt.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` array
:attr:`~matplotlib.cm.ScalarMappable._A` is not None (i.e., a call to
:meth:`~matplotlib.cm.ScalarMappable.set_array` has been made), at
draw time a call to scalar mappable will be made to set the colors.
**Example:**
.. plot:: mpl_examples/pylab_examples/eventcollection_demo.py
"""
segment = (lineoffset + linelength / 2.,
lineoffset - linelength / 2.)
if len(positions) == 0:
segments = []
elif hasattr(positions, 'ndim') and positions.ndim > 1:
raise ValueError('if positions is an ndarry it cannot have '
'dimensionality great than 1 ')
elif (orientation is None or orientation.lower() == 'none' or
orientation.lower() == 'horizontal'):
positions.sort()
segments = [[(coord1, coord2) for coord2 in segment] for
coord1 in positions]
self._is_horizontal = True
elif orientation.lower() == 'vertical':
positions.sort()
segments = [[(coord2, coord1) for coord2 in segment] for
coord1 in positions]
self._is_horizontal = False
else:
raise ValueError("orientation must be 'horizontal' or 'vertical'")
LineCollection.__init__(self,
segments,
linewidths=linewidth,
colors=color,
antialiaseds=antialiased,
linestyles=linestyle,
**kwargs)
self._linelength = linelength
self._lineoffset = lineoffset
def get_positions(self):
'''
return an array containing the floating-point values of the positions
'''
segments = self.get_segments()
pos = 0 if self.is_horizontal() else 1
positions = []
for segment in segments:
positions.append(segment[0, pos])
return positions
def set_positions(self, positions):
'''
set the positions of the events to the specified value
'''
if positions is None or (hasattr(positions, 'len') and
len(positions) == 0):
self.set_segments([])
return
lineoffset = self.get_lineoffset()
linelength = self.get_linelength()
segment = (lineoffset + linelength / 2.,
lineoffset - linelength / 2.)
positions = np.asanyarray(positions)
positions.sort()
if self.is_horizontal():
segments = [[(coord1, coord2) for coord2 in segment] for
coord1 in positions]
else:
segments = [[(coord2, coord1) for coord2 in segment] for
coord1 in positions]
self.set_segments(segments)
def add_positions(self, position):
'''
add one or more events at the specified positions
'''
if position is None or (hasattr(position, 'len') and
len(position) == 0):
return
positions = self.get_positions()
positions = np.hstack([positions, np.asanyarray(position)])
self.set_positions(positions)
extend_positions = append_positions = add_positions
def is_horizontal(self):
'''
True if the eventcollection is horizontal, False if vertical
'''
return self._is_horizontal
def get_orientation(self):
'''
get the orientation of the event line, may be:
[ 'horizontal' | 'vertical' ]
'''
return 'horizontal' if self.is_horizontal() else 'vertical'
def switch_orientation(self):
'''
switch the orientation of the event line, either from vertical to
horizontal or vice versus
'''
segments = self.get_segments()
for i, segment in enumerate(segments):
segments[i] = np.fliplr(segment)
self.set_segments(segments)
self._is_horizontal = not self.is_horizontal()
def set_orientation(self, orientation=None):
'''
set the orientation of the event line
[ 'horizontal' | 'vertical' | None ]
defaults to 'horizontal' if not specified or None
'''
if (orientation is None or orientation.lower() == 'none' or
orientation.lower() == 'horizontal'):
is_horizontal = True
elif orientation.lower() == 'vertical':
is_horizontal = False
else:
raise ValueError("orientation must be 'horizontal' or 'vertical'")
if is_horizontal == self.is_horizontal():
return
self.switch_orientation()
def get_linelength(self):
'''
get the length of the lines used to mark each event
'''
return self._linelength
def set_linelength(self, linelength):
'''
set the length of the lines used to mark each event
'''
if linelength == self.get_linelength():
return
lineoffset = self.get_lineoffset()
segments = self.get_segments()
pos = 1 if self.is_horizontal() else 0
for segment in segments:
segment[0, pos] = lineoffset + linelength / 2.
segment[1, pos] = lineoffset - linelength / 2.
self.set_segments(segments)
self._linelength = linelength
def get_lineoffset(self):
'''
get the offset of the lines used to mark each event
'''
return self._lineoffset
def set_lineoffset(self, lineoffset):
'''
set the offset of the lines used to mark each event
'''
if lineoffset == self.get_lineoffset():
return
linelength = self.get_linelength()
segments = self.get_segments()
pos = 1 if self.is_horizontal() else 0
for segment in segments:
segment[0, pos] = lineoffset + linelength / 2.
segment[1, pos] = lineoffset - linelength / 2.
self.set_segments(segments)
self._lineoffset = lineoffset
def get_linewidth(self):
'''
get the width of the lines used to mark each event
'''
return self.get_linewidths()[0]
def get_linestyle(self):
'''
get the style of the lines used to mark each event
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
'''
return self.get_linestyles()
def get_color(self):
'''
get the color of the lines used to mark each event
'''
return self.get_colors()[0]
class CircleCollection(_CollectionWithSizes):
"""
A collection of circles, drawn using splines.
"""
_factor = CIRCLE_AREA_FACTOR
@docstring.dedent_interpd
def __init__(self, sizes, **kwargs):
"""
*sizes*
Gives the area of the circle in points^2
%(Collection)s
"""
Collection.__init__(self, **kwargs)
self.set_sizes(sizes)
self.set_transform(transforms.IdentityTransform())
self._paths = [mpath.Path.unit_circle()]
class EllipseCollection(Collection):
"""
A collection of ellipses, drawn using splines.
"""
@docstring.dedent_interpd
def __init__(self, widths, heights, angles, units='points', **kwargs):
"""
*widths*: sequence
lengths of first axes (e.g., major axis lengths)
*heights*: sequence
lengths of second axes
*angles*: sequence
angles of first axes, degrees CCW from the X-axis
*units*: ['points' | 'inches' | 'dots' | 'width' | 'height'
| 'x' | 'y' | 'xy']
units in which majors and minors are given; 'width' and
'height' refer to the dimensions of the axes, while 'x'
and 'y' refer to the *offsets* data units. 'xy' differs
from all others in that the angle as plotted varies with
the aspect ratio, and equals the specified angle only when
the aspect ratio is unity. Hence it behaves the same as
the :class:`~matplotlib.patches.Ellipse` with
axes.transData as its transform.
Additional kwargs inherited from the base :class:`Collection`:
%(Collection)s
"""
Collection.__init__(self, **kwargs)
self._widths = 0.5 * np.asarray(widths).ravel()
self._heights = 0.5 * np.asarray(heights).ravel()
self._angles = np.asarray(angles).ravel() * (np.pi / 180.0)
self._units = units
self.set_transform(transforms.IdentityTransform())
self._transforms = []
self._paths = [mpath.Path.unit_circle()]
def _set_transforms(self):
"""
Calculate transforms immediately before drawing.
"""
ax = self.axes
fig = self.figure
if self._units == 'xy':
sc = 1
elif self._units == 'x':
sc = ax.bbox.width / ax.viewLim.width
elif self._units == 'y':
sc = ax.bbox.height / ax.viewLim.height
elif self._units == 'inches':
sc = fig.dpi
elif self._units == 'points':
sc = fig.dpi / 72.0
elif self._units == 'width':
sc = ax.bbox.width
elif self._units == 'height':
sc = ax.bbox.height
elif self._units == 'dots':
sc = 1.0
else:
raise ValueError('unrecognized units: %s' % self._units)
self._transforms = np.zeros((len(self._widths), 3, 3))
widths = self._widths * sc
heights = self._heights * sc
sin_angle = np.sin(self._angles)
cos_angle = np.cos(self._angles)
self._transforms[:, 0, 0] = widths * cos_angle
self._transforms[:, 0, 1] = heights * -sin_angle
self._transforms[:, 1, 0] = widths * sin_angle
self._transforms[:, 1, 1] = heights * cos_angle
self._transforms[:, 2, 2] = 1.0
_affine = transforms.Affine2D
if self._units == 'xy':
m = ax.transData.get_affine().get_matrix().copy()
m[:2, 2:] = 0
self.set_transform(_affine(m))
@allow_rasterization
def draw(self, renderer):
self._set_transforms()
Collection.draw(self, renderer)
class PatchCollection(Collection):
"""
A generic collection of patches.
This makes it easier to assign a color map to a heterogeneous
collection of patches.
This also may improve plotting speed, since PatchCollection will
draw faster than a large number of patches.
"""
def __init__(self, patches, match_original=False, **kwargs):
"""
*patches*
a sequence of Patch objects. This list may include
a heterogeneous assortment of different patch types.
*match_original*
If True, use the colors and linewidths of the original
patches. If False, new colors may be assigned by
providing the standard collection arguments, facecolor,
edgecolor, linewidths, norm or cmap.
If any of *edgecolors*, *facecolors*, *linewidths*,
*antialiaseds* are None, they default to their
:data:`matplotlib.rcParams` patch setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not
None (i.e., a call to set_array has been made), at draw time a
call to scalar mappable will be made to set the face colors.
"""
if match_original:
def determine_facecolor(patch):
if patch.get_fill():
return patch.get_facecolor()
return [0, 0, 0, 0]
facecolors = [determine_facecolor(p) for p in patches]
edgecolors = [p.get_edgecolor() for p in patches]
linewidths = [p.get_linewidth() for p in patches]
linestyles = [p.get_linestyle() for p in patches]
antialiaseds = [p.get_antialiased() for p in patches]
Collection.__init__(
self,
edgecolors=edgecolors,
facecolors=facecolors,
linewidths=linewidths,
linestyles=linestyles,
antialiaseds=antialiaseds)
else:
Collection.__init__(self, **kwargs)
self.set_paths(patches)
def set_paths(self, patches):
paths = [p.get_transform().transform_path(p.get_path())
for p in patches]
self._paths = paths
class TriMesh(Collection):
"""
Class for the efficient drawing of a triangular mesh using
Gouraud shading.
A triangular mesh is a :class:`~matplotlib.tri.Triangulation`
object.
"""
def __init__(self, triangulation, **kwargs):
Collection.__init__(self, **kwargs)
self._triangulation = triangulation
self._shading = 'gouraud'
self._is_filled = True
self._bbox = transforms.Bbox.unit()
# Unfortunately this requires a copy, unless Triangulation
# was rewritten.
xy = np.hstack((triangulation.x.reshape(-1, 1),
triangulation.y.reshape(-1, 1)))
self._bbox.update_from_data_xy(xy)
def get_paths(self):
if self._paths is None:
self.set_paths()
return self._paths
def set_paths(self):
self._paths = self.convert_mesh_to_paths(self._triangulation)
@staticmethod
def convert_mesh_to_paths(tri):
"""
Converts a given mesh into a sequence of
:class:`matplotlib.path.Path` objects for easier rendering by
backends that do not directly support meshes.
This function is primarily of use to backend implementers.
"""
Path = mpath.Path
triangles = tri.get_masked_triangles()
verts = np.concatenate((tri.x[triangles][..., np.newaxis],
tri.y[triangles][..., np.newaxis]), axis=2)
return [Path(x) for x in verts]
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__)
transform = self.get_transform()
# Get a list of triangles and the color at each vertex.
tri = self._triangulation
triangles = tri.get_masked_triangles()
verts = np.concatenate((tri.x[triangles][..., np.newaxis],
tri.y[triangles][..., np.newaxis]), axis=2)
self.update_scalarmappable()
colors = self._facecolors[triangles]
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_linewidth(self.get_linewidth()[0])
renderer.draw_gouraud_triangles(gc, verts, colors, transform.frozen())
gc.restore()
renderer.close_group(self.__class__.__name__)
class QuadMesh(Collection):
"""
Class for the efficient drawing of a quadrilateral mesh.
A quadrilateral mesh consists of a grid of vertices. The
dimensions of this array are (*meshWidth* + 1, *meshHeight* +
1). Each vertex in the mesh has a different set of "mesh
coordinates" representing its position in the topology of the
mesh. For any values (*m*, *n*) such that 0 <= *m* <= *meshWidth*
and 0 <= *n* <= *meshHeight*, the vertices at mesh coordinates
(*m*, *n*), (*m*, *n* + 1), (*m* + 1, *n* + 1), and (*m* + 1, *n*)
form one of the quadrilaterals in the mesh. There are thus
(*meshWidth* * *meshHeight*) quadrilaterals in the mesh. The mesh
need not be regular and the polygons need not be convex.
A quadrilateral mesh is represented by a (2 x ((*meshWidth* + 1) *
(*meshHeight* + 1))) numpy array *coordinates*, where each row is
the *x* and *y* coordinates of one of the vertices. To define the
function that maps from a data point to its corresponding color,
use the :meth:`set_cmap` method. Each of these arrays is indexed in
row-major order by the mesh coordinates of the vertex (or the mesh
coordinates of the lower left vertex, in the case of the
colors).
For example, the first entry in *coordinates* is the
coordinates of the vertex at mesh coordinates (0, 0), then the one
at (0, 1), then at (0, 2) .. (0, meshWidth), (1, 0), (1, 1), and
so on.
*shading* may be 'flat', or 'gouraud'
"""
def __init__(self, meshWidth, meshHeight, coordinates,
antialiased=True, shading='flat', **kwargs):
Collection.__init__(self, **kwargs)
self._meshWidth = meshWidth
self._meshHeight = meshHeight
self._coordinates = coordinates
self._antialiased = antialiased
self._shading = shading
self._bbox = transforms.Bbox.unit()
self._bbox.update_from_data_xy(coordinates.reshape(
((meshWidth + 1) * (meshHeight + 1), 2)))
# By converting to floats now, we can avoid that on every draw.
self._coordinates = self._coordinates.reshape(
(meshHeight + 1, meshWidth + 1, 2))
self._coordinates = np.array(self._coordinates, np.float_)
def get_paths(self):
if self._paths is None:
self.set_paths()
return self._paths
def set_paths(self):
self._paths = self.convert_mesh_to_paths(
self._meshWidth, self._meshHeight, self._coordinates)
def get_datalim(self, transData):
return (self.get_transform() - transData).transform_bbox(self._bbox)
@staticmethod
def convert_mesh_to_paths(meshWidth, meshHeight, coordinates):
"""
Converts a given mesh into a sequence of
:class:`matplotlib.path.Path` objects for easier rendering by
backends that do not directly support quadmeshes.
This function is primarily of use to backend implementers.
"""
Path = mpath.Path
if ma.isMaskedArray(coordinates):
c = coordinates.data
else:
c = coordinates
points = np.concatenate((
c[0:-1, 0:-1],
c[0:-1, 1:],
c[1:, 1:],
c[1:, 0:-1],
c[0:-1, 0:-1]
), axis=2)
points = points.reshape((meshWidth * meshHeight, 5, 2))
return [Path(x) for x in points]
def convert_mesh_to_triangles(self, meshWidth, meshHeight, coordinates):
"""
Converts a given mesh into a sequence of triangles, each point
with its own color. This is useful for experiments using
`draw_qouraud_triangle`.
"""
if ma.isMaskedArray(coordinates):
p = coordinates.data
else:
p = coordinates
p_a = p[:-1, :-1]
p_b = p[:-1, 1:]
p_c = p[1:, 1:]
p_d = p[1:, :-1]
p_center = (p_a + p_b + p_c + p_d) / 4.0
triangles = np.concatenate((
p_a, p_b, p_center,
p_b, p_c, p_center,
p_c, p_d, p_center,
p_d, p_a, p_center,
), axis=2)
triangles = triangles.reshape((meshWidth * meshHeight * 4, 3, 2))
c = self.get_facecolor().reshape((meshHeight + 1, meshWidth + 1, 4))
c_a = c[:-1, :-1]
c_b = c[:-1, 1:]
c_c = c[1:, 1:]
c_d = c[1:, :-1]
c_center = (c_a + c_b + c_c + c_d) / 4.0
colors = np.concatenate((
c_a, c_b, c_center,
c_b, c_c, c_center,
c_c, c_d, c_center,
c_d, c_a, c_center,
), axis=2)
colors = colors.reshape((meshWidth * meshHeight * 4, 3, 4))
return triangles, colors
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__, self.get_gid())
transform = self.get_transform()
transOffset = self.get_offset_transform()
offsets = self._offsets
if self.have_units():
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:, 0])
ys = self.convert_yunits(self._offsets[:, 1])
offsets = list(zip(xs, ys))
offsets = np.asarray(offsets, np.float_)
offsets.shape = (-1, 2) # Make it Nx2
self.update_scalarmappable()
if not transform.is_affine:
coordinates = self._coordinates.reshape(
(self._coordinates.shape[0] *
self._coordinates.shape[1],
2))
coordinates = transform.transform(coordinates)
coordinates = coordinates.reshape(self._coordinates.shape)
transform = transforms.IdentityTransform()
else:
coordinates = self._coordinates
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_linewidth(self.get_linewidth()[0])
if self._shading == 'gouraud':
triangles, colors = self.convert_mesh_to_triangles(
self._meshWidth, self._meshHeight, coordinates)
renderer.draw_gouraud_triangles(
gc, triangles, colors, transform.frozen())
else:
renderer.draw_quad_mesh(
gc, transform.frozen(), self._meshWidth, self._meshHeight,
coordinates, offsets, transOffset, self.get_facecolor(),
self._antialiased, self.get_edgecolors())
gc.restore()
renderer.close_group(self.__class__.__name__)
patchstr = artist.kwdoc(Collection)
for k in ('QuadMesh', 'TriMesh', 'PolyCollection', 'BrokenBarHCollection',
'RegularPolyCollection', 'PathCollection',
'StarPolygonCollection', 'PatchCollection',
'CircleCollection', 'Collection',):
docstring.interpd.update({k: patchstr})
docstring.interpd.update(LineCollection=artist.kwdoc(LineCollection))
| {
"content_hash": "c5dcbf23255eb5f705f7a0e06bb8883d",
"timestamp": "",
"source": "github",
"line_count": 1838,
"max_line_length": 81,
"avg_line_length": 34.33188248095756,
"alnum_prop": 0.5653703527621945,
"repo_name": "miloharper/neural-network-animation",
"id": "e84bd3e578d3fbc58d40e79de6bbb225870e6051",
"size": "63102",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "matplotlib/collections.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5382"
},
{
"name": "HTML",
"bytes": "4000"
},
{
"name": "JavaScript",
"bytes": "24260"
},
{
"name": "Python",
"bytes": "4443606"
}
],
"symlink_target": ""
} |
"""curses
The main package for curses support for Python. Normally used by importing
the package, and perhaps a particular module inside it.
import curses
from curses import textpad
curses.initwin()
...
"""
__revision__ = "$Id: __init__.py,v 1.4 2001/04/05 16:08:41 akuchling Exp $"
from _curses import *
from curses.wrapper import wrapper
# Some constants, most notably the ACS_* ones, are only added to the C
# _curses module's dictionary after initscr() is called. (Some
# versions of SGI's curses don't define values for those constants
# until initscr() has been called.) This wrapper function calls the
# underlying C initscr(), and then copies the constants from the
# _curses module to the curses package's dictionary. Don't do 'from
# curses import *' if you'll be needing the ACS_* constants.
def initscr():
import _curses, curses
stdscr = _curses.initscr()
for key, value in _curses.__dict__.items():
if key[0:4] == 'ACS_' or key in ('LINES', 'COLS'):
setattr(curses, key, value)
return stdscr
# This is a similar wrapper for start_color(), which adds the COLORS and
# COLOR_PAIRS variables which are only available after start_color() is
# called.
def start_color():
import _curses, curses
retval = _curses.start_color()
if hasattr(_curses, 'COLORS'):
curses.COLORS = _curses.COLORS
if hasattr(_curses, 'COLOR_PAIRS'):
curses.COLOR_PAIRS = _curses.COLOR_PAIRS
return retval
# Import Python has_key() implementation if _curses doesn't contain has_key()
try:
has_key
except NameError:
from has_key import has_key
| {
"content_hash": "a34a290a1f31f612a5c08e22f96f492d",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 77,
"avg_line_length": 30.38888888888889,
"alnum_prop": 0.6849482023156612,
"repo_name": "MalloyPower/parsing-python",
"id": "9691e1dae06075d1341152087ffe32bbda293525",
"size": "1641",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.2/Lib/curses/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Ship()
result.template = "object/ship/shared_blacksun_medium_s03_tier4.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "73cff0c2e6b4573229c47a0910cee194",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 69,
"avg_line_length": 21.307692307692307,
"alnum_prop": 0.6714801444043321,
"repo_name": "obi-two/Rebelion",
"id": "4266010a6da5ce6e884de5a414d4d31243128f81",
"size": "422",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/ship/shared_blacksun_medium_s03_tier4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import swapper
from accelerator_abstract.models.base_program_cycle import BaseProgramCycle
class ProgramCycle(BaseProgramCycle):
class Meta(BaseProgramCycle.Meta):
swappable = swapper.swappable_setting(BaseProgramCycle.Meta.app_label,
"ProgramCycle")
| {
"content_hash": "22a1c797f3fe60cebda7d02ae3ed7799",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 78,
"avg_line_length": 32.09090909090909,
"alnum_prop": 0.6940509915014165,
"repo_name": "masschallenge/django-accelerator",
"id": "f431ca01e4129eb038152b9d5ce53c676f8fe8db",
"size": "353",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "accelerator/models/program_cycle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1848"
},
{
"name": "Makefile",
"bytes": "6817"
},
{
"name": "Python",
"bytes": "996767"
},
{
"name": "Shell",
"bytes": "2453"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_waf_signature
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_waf_signature.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_waf_signature_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'waf_signature': {
'desc': 'test_value_3',
'id': '4'
},
'vdom': 'root'}
is_error, changed, response = fortios_waf_signature.fortios_waf(input_data, fos_instance)
expected_data = {
'desc': 'test_value_3',
'id': '4'
}
set_method_mock.assert_called_with('waf', 'signature', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_waf_signature_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'waf_signature': {
'desc': 'test_value_3',
'id': '4'
},
'vdom': 'root'}
is_error, changed, response = fortios_waf_signature.fortios_waf(input_data, fos_instance)
expected_data = {
'desc': 'test_value_3',
'id': '4'
}
set_method_mock.assert_called_with('waf', 'signature', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_waf_signature_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'waf_signature': {
'desc': 'test_value_3',
'id': '4'
},
'vdom': 'root'}
is_error, changed, response = fortios_waf_signature.fortios_waf(input_data, fos_instance)
delete_method_mock.assert_called_with('waf', 'signature', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_waf_signature_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'waf_signature': {
'desc': 'test_value_3',
'id': '4'
},
'vdom': 'root'}
is_error, changed, response = fortios_waf_signature.fortios_waf(input_data, fos_instance)
delete_method_mock.assert_called_with('waf', 'signature', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_waf_signature_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'waf_signature': {
'desc': 'test_value_3',
'id': '4'
},
'vdom': 'root'}
is_error, changed, response = fortios_waf_signature.fortios_waf(input_data, fos_instance)
expected_data = {
'desc': 'test_value_3',
'id': '4'
}
set_method_mock.assert_called_with('waf', 'signature', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_waf_signature_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'waf_signature': {
'random_attribute_not_valid': 'tag',
'desc': 'test_value_3',
'id': '4'
},
'vdom': 'root'}
is_error, changed, response = fortios_waf_signature.fortios_waf(input_data, fos_instance)
expected_data = {
'desc': 'test_value_3',
'id': '4'
}
set_method_mock.assert_called_with('waf', 'signature', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| {
"content_hash": "f9c24d92f12826faefce939fefbdc508",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 142,
"avg_line_length": 34.55440414507772,
"alnum_prop": 0.6437246963562753,
"repo_name": "thaim/ansible",
"id": "2dfefd26262a39e43a7ff60638d5cbb86d8bc3f0",
"size": "7365",
"binary": false,
"copies": "20",
"ref": "refs/heads/fix-broken-link",
"path": "test/units/modules/network/fortios/test_fortios_waf_signature.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
"""Împăratul a primit serie de mesaje importante pe care este
important să le descifreze cât mai repede.
Din păcate mesagerul nu a apucat să îi spună împăratul care au fost
cheile alese pentru fiecare mesaj și tu ai fost ales să descifrezi
misterul.
Informații:
În criptografie, cifrul lui Caesar este o metodă simplă de a cripta
un mesaj prin înlocuirea fiecărei litere cu litera de pe poziția aflată
la un n pași de ea în alfabet (unde este n este un număr întreg cunoscut
"""
# existau 2 variante de a rezolva problema cu parantezele la print
# am preferat sa o folosesc pe asta pentru a evita si eventualele probleme
# cu care ziceai tu ca o sa ne stresezi ;)
from __future__ import print_function
LETTERS = "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"
# tot timpul se va gasi litera in string-ul "LETTERS"
# deci circularitatea e suficient
# reprezentata prin a-z de doua ori
def shift_letter(let, number):
"""Shifts a letter by number places in LETTERS"""
if let.isalpha():
# procesam doar literele
return LETTERS[ord(let) - 97 + number]
# returnam litera de peste n locuri in LETTERS
else:
return let
# daca nu e litera, returnam caracterul original
def decripteaza(mesaj, number):
"""Decrypts every line in <mesaj>"""
new_msg = ""
for char in mesaj:
new_msg += shift_letter(char, number)
if "ave" in new_msg:
print(new_msg)
def main():
"""Have a main docstring, pylint"""
try:
fisier = open("mesaje.secret", "r")
mesaje = fisier.read()
fisier.close()
except IOError:
print("Nu am putut obține mesajele.")
return
for mesaj in mesaje.splitlines():
for i in range(26):
decripteaza(mesaj, i)
if __name__ == "__main__":
main()
| {
"content_hash": "f26d11dc4381e5c29bb61c116ec2b2b2",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 74,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.6796703296703297,
"repo_name": "alexandrucoman/labs",
"id": "ae68fba7105cb935cc993e9c8b8340444eb66a26",
"size": "1892",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "python/solutii/alex_mitan/caesar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "3108"
},
{
"name": "Python",
"bytes": "272805"
},
{
"name": "Shell",
"bytes": "10114"
}
],
"symlink_target": ""
} |
import tensorflow as tf
from ._common import layer_register
__all__ = ['SoftMax']
@layer_register()
def SoftMax(x, use_temperature=False, temperature_init=1.0):
"""
A SoftMax layer (no linear projection) with optional temperature
:param x: a 2D tensor
"""
if use_temperature:
t = tf.get_variable('invtemp', [],
initializer=tf.constant_initializer(1.0 / float(temperature_init)))
x = x * t
return tf.nn.softmax(x, name='output')
| {
"content_hash": "33770e1142f8bf72d1b20bd707e1b14d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 83,
"avg_line_length": 30.375,
"alnum_prop": 0.6378600823045267,
"repo_name": "yinglanma/AI-project",
"id": "a5ebd049a2d301635497174aeb1e7113a3a9acad",
"size": "593",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorpack/models/softmax.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "319384"
}
],
"symlink_target": ""
} |
import unittest
from nymms.schemas import Result, StateRecord, types
from nymms.reactor import filters
class TestFilters(unittest.TestCase):
def setUp(self):
self.result = Result({'id': 'test:filter',
'state': types.STATE_OK,
'state_type': types.STATE_TYPE_HARD})
self.result.validate()
self.record = StateRecord({
'id': 'test:filter',
'state': types.STATE_OK,
'state_type': types.STATE_TYPE_HARD})
self.record.validate()
def test_hard_state(self):
self.assertTrue(filters.hard_state(self.result, self.record))
self.result.state_type = types.STATE_TYPE_SOFT
self.result.validate()
self.assertFalse(filters.hard_state(self.result, self.record))
def test_ok_state(self):
self.assertTrue(filters.ok_state(self.result, self.record))
self.result.state = types.STATE_WARNING
self.result.validate()
self.assertFalse(filters.ok_state(self.result, self.record))
def test_not_ok_state(self):
self.assertFalse(filters.not_ok_state(self.result, self.record))
self.result.state = types.STATE_WARNING
self.result.validate()
self.assertTrue(filters.not_ok_state(self.result, self.record))
def test_warning_state(self):
self.assertFalse(filters.warning_state(self.result, self.record))
self.result.state = types.STATE_WARNING
self.result.validate()
self.assertTrue(filters.warning_state(self.result, self.record))
def test_critical_state(self):
self.assertFalse(filters.critical_state(self.result, self.record))
self.result.state = types.STATE_CRITICAL
self.result.validate()
self.assertTrue(filters.critical_state(self.result, self.record))
def test_unknown_state(self):
self.assertFalse(filters.unknown_state(self.result, self.record))
self.result.state = types.STATE_UNKNOWN
self.result.validate()
self.assertTrue(filters.unknown_state(self.result, self.record))
def test_changed_state(self):
f = filters.changed_state
self.assertFalse(f(self.result, self.record))
self.assertTrue(f(self.result, None))
self.result.state = types.STATE_CRITICAL
self.result.validate()
self.assertTrue(f(self.result, self.record))
self.result.state = types.STATE_OK
self.result.state_type = types.STATE_TYPE_SOFT
self.result.validate()
self.assertTrue(f(self.result, self.record))
| {
"content_hash": "237eea373dfe3af9be088e0fbd8339dc",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 74,
"avg_line_length": 35.13513513513514,
"alnum_prop": 0.6484615384615384,
"repo_name": "cloudtools/nymms",
"id": "499e4b300a741b75a6f0d46d08524dc115a37eaa",
"size": "2600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nymms/reactor/filters/tests/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "139423"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from core.models import BaseManager, Subcategory, Detail
from . import DETAIL_TYPE
class FormsTypeManager(BaseManager):
def get_queryset(self):
q = super(FormsTypeManager, self).get_queryset()
return q.filter(type=DETAIL_TYPE)
class FormSubcategory(Subcategory):
objects = FormsTypeManager()
class Meta:
proxy = True
verbose_name = 'Form subcategory'
verbose_name_plural = 'Form subcategories'
def save(self, *args, **kwargs):
self.type = DETAIL_TYPE
super(FormSubcategory, self).save(*args, **kwargs)
class FormDetail(Detail):
objects = FormsTypeManager()
class Meta:
proxy = True
verbose_name = 'Form detail'
verbose_name_plural = 'Form details'
def save(self, *args, **kwargs):
self.type = DETAIL_TYPE
super(FormDetail, self).save(*args, **kwargs) | {
"content_hash": "2be0a32e237e50bfa6a6492e6c2f7aa0",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 58,
"avg_line_length": 23.794871794871796,
"alnum_prop": 0.6519396551724138,
"repo_name": "michaupl/materialsapp",
"id": "fbe8ac5e3375d6bd1ad1d297d19b1019f19f6bd0",
"size": "944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forms/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6799"
},
{
"name": "JavaScript",
"bytes": "9273"
},
{
"name": "Python",
"bytes": "213454"
}
],
"symlink_target": ""
} |
""" Humans.txt generator, based on work done on Kuma.
https://github.com/mozilla/kuma/blob/master/apps/humans/models.py
More info about humans.txt here http://humanstxt.org/"""
from django.apps import AppConfig
default_app_config = 'mozillians.humans.HumansConfig'
class HumansConfig(AppConfig):
name = 'mozillians.humans'
| {
"content_hash": "ef1053bde793dea88cdcb136a7fb0af1",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 65,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.755223880597015,
"repo_name": "chirilo/mozillians",
"id": "023ceac427503eb522a545c1f3624c6a7e605c55",
"size": "335",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "mozillians/humans/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1986"
},
{
"name": "CSS",
"bytes": "206029"
},
{
"name": "HTML",
"bytes": "160644"
},
{
"name": "JavaScript",
"bytes": "90367"
},
{
"name": "Makefile",
"bytes": "478"
},
{
"name": "Python",
"bytes": "9149147"
},
{
"name": "Shell",
"bytes": "7758"
}
],
"symlink_target": ""
} |
"""A utility tool to run pnacl-translate for all archtectures.
Example usage:
The following command generates stripped nexefile_arm.nexe and
nexefile_x86_32.nexe and nexefile_x86_64.nexe.
python pnacl_translate.py --command=/path/to/toolchain/linux_pnacl \
--input=/path/to/pexefile --output_base=/path/to/nexefile \
--configuration=Release
"""
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
def Translate(toolchain_root, input_file, output_base):
"""Translates the input file for three architectures."""
targets = (('arm', 'arm'), ('x86-32', 'x86_32'), ('x86-64', 'x86_64'))
translate_command = os.path.join(toolchain_root, 'bin/pnacl-translate')
for target in targets:
cmd = (translate_command, '--allow-llvm-bitcode-input', '-arch', target[0],
input_file, '-o', '%s_%s.nexe' % (output_base, target[1]))
print 'Running: ' + ' '.join(cmd)
if subprocess.Popen(cmd).wait() != 0:
print >> sys.stderr, 'ERROR: ' + ' '.join(cmd)
raise RuntimeError('Translate Error')
print 'Done: ' + ' '.join(cmd)
def StripAndTranslate(toolchain_root, input_file, output_base):
"""Strips and translates the input file for three architectures."""
strip_command = os.path.join(toolchain_root, 'bin/pnacl-strip')
try:
temp_dir = tempfile.mkdtemp()
temp_file_base = os.path.join(temp_dir, 'stripped')
cmd = (strip_command, input_file, '-o', temp_file_base)
print 'Running: ' + ' '.join(cmd)
if subprocess.Popen(cmd).wait() != 0:
print >> sys.stderr, 'ERROR: ' + ' '.join(cmd)
raise RuntimeError('Strip Error')
print 'Done: ' + ' '.join(cmd)
Translate(toolchain_root, temp_file_base, temp_file_base)
targets = ('arm', 'x86_32', 'x86_64')
for target in targets:
cmd = (strip_command, '%s_%s.nexe' % (temp_file_base, target),
'-o', '%s_%s.nexe' % (output_base, target))
print 'Running: ' + ' '.join(cmd)
if subprocess.Popen(cmd).wait() != 0:
print >> sys.stderr, 'ERROR: ' + ' '.join(cmd)
raise RuntimeError('Strip Error')
print 'Done: ' + ' '.join(cmd)
finally:
shutil.rmtree(temp_dir)
def main():
"""Translate pexe file to x86-32 and x86-64 and arm nexe files."""
parser = optparse.OptionParser(usage='Usage: %prog')
parser.add_option('--toolchain_root', dest='toolchain_root',
help='pnacl toolchain root path')
parser.add_option('--input', dest='input',
help='input pexe file')
parser.add_option('--output_base', dest='output_base',
help='output base path')
parser.add_option('--configuration', dest='configuration',
help='build configuration')
(options, _) = parser.parse_args()
if not options.toolchain_root:
print >> sys.stderr, 'Error: toolchain_root is not set.'
sys.exit(1)
if not options.input:
print >> sys.stderr, 'Error: input is not set.'
sys.exit(1)
if not options.output_base:
print >> sys.stderr, 'Error: output_base is not set.'
sys.exit(1)
if options.configuration == 'Release':
return StripAndTranslate(options.toolchain_root,
options.input,
options.output_base)
else:
return Translate(options.toolchain_root,
options.input,
options.output_base)
if __name__ == '__main__':
main()
| {
"content_hash": "9018526e48627d9b55970c69c1da504e",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 79,
"avg_line_length": 35.8125,
"alnum_prop": 0.6175101803374055,
"repo_name": "takahashikenichi/mozc",
"id": "e4008d197f0f14c0f0d7f3a3ea47960d46d67c56",
"size": "4996",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/chrome/nacl/pnacl_translate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "200335"
},
{
"name": "C++",
"bytes": "10808666"
},
{
"name": "CSS",
"bytes": "26088"
},
{
"name": "Emacs Lisp",
"bytes": "80074"
},
{
"name": "HTML",
"bytes": "266980"
},
{
"name": "Java",
"bytes": "2751856"
},
{
"name": "JavaScript",
"bytes": "919906"
},
{
"name": "Makefile",
"bytes": "3754"
},
{
"name": "Objective-C",
"bytes": "34833"
},
{
"name": "Objective-C++",
"bytes": "227200"
},
{
"name": "Protocol Buffer",
"bytes": "112300"
},
{
"name": "Python",
"bytes": "1056960"
},
{
"name": "QMake",
"bytes": "861"
},
{
"name": "Shell",
"bytes": "9928"
},
{
"name": "Yacc",
"bytes": "2104"
}
],
"symlink_target": ""
} |
import sys
import re
import optparse
from ctypes import *
"""
This script will use the prototypes from "checkdocs.py -s" to concoct
a 1:1 Python wrapper for Allegro.
"""
class _AL_UTF8String:
pass
class Allegro:
def __init__(self):
self.types = {}
self.functions = {}
self.constants = {}
def add_struct(self, name):
x = type(name, (Structure, ), {})
self.types[name] = x
def add_union(self, name):
x = type(name, (Union, ), {})
self.types[name] = x
def get_type(self, ptype):
conversion = {
"bool": c_bool,
"_Bool": c_bool,
"char": c_byte,
"unsignedchar": c_ubyte,
"int": c_int,
"unsigned": c_uint,
"unsignedint": c_uint,
"int16_t": c_int16,
"uint16_t": c_uint16,
"int32_t": c_int32,
"uint32_t": c_uint32,
"int64_t": c_int64,
"uint64_t": c_uint64,
"uintptr_t": c_void_p,
"intptr_t": c_void_p,
"GLuint": c_uint,
"unsignedlong": c_ulong,
"long": c_long,
"size_t": c_size_t,
"off_t": c_int64,
"time_t": c_int64,
"va_list": c_void_p,
"float": c_float,
"double": c_double,
"al_fixed": c_int,
"HWND": c_void_p,
"char*": _AL_UTF8String,
# hack: this probably shouldn't be in the public docs
"postprocess_callback_t": c_void_p,
}
ptype = re.sub(r"\bstruct|union\b", "", ptype)
ptype = re.sub(r"\bconst\b", "", ptype)
ptype = re.sub(r"\extern\b", "", ptype)
ptype = re.sub(r"\__inline__\b", "", ptype)
ptype = re.sub(r"\s+", "", ptype)
if ptype.endswith("*"):
if ptype in conversion:
return conversion[ptype]
t = ptype[:-1]
if t in self.types:
return POINTER(self.types[t])
return c_void_p
elif ptype in self.types:
return self.types[ptype]
else:
try:
return conversion[ptype]
except KeyError:
print("Type Error:" + str(ptype))
return None
def parse_funcs(self, funcs):
"""
Go through all documented functions and add their prototypes
as Python functions.
The file should have been generated by Allegro's documentation
generation scripts.
"""
for func in funcs:
name, proto = func.split(":", 1)
if not name.startswith("al_"):
continue
proto = proto.strip()
name = name[:-2]
if proto.startswith("enum"):
continue
if proto.startswith("typedef"):
continue
if "=" in proto:
continue
if proto.startswith("#"):
continue
funcstart = proto.find(name)
funcend = funcstart + len(name)
ret = proto[:funcstart].rstrip()
params = proto[funcend:].strip(" ;")
if params[0] != "(" or params[-1] != ")":
print("Error:")
print(params)
continue
params2 = params[1:-1]
# remove callback argument lists
balance = 0
params = ""
for c in params2:
if c == ")":
balance -= 1
if balance == 0:
params += c
if c == "(":
balance += 1
params = params.split(",")
plist = []
for param in params:
param = re.sub(r"\bconst\b", "", param)
param = param.strip()
if param == "void":
continue
if param == "":
continue
if param == "...":
continue
# treat arrays as a void pointer, for now
if param.endswith("]") or param.endswith("*"):
plist.append(c_void_p)
continue
# treat callbacks as a void pointer, for now
if param.endswith(")"):
plist.append(c_void_p)
continue
mob = re.match("^.*?(\w+)$", param)
if mob:
pnamepos = mob.start(1)
if pnamepos == 0:
# Seems the parameter is not named
pnamepos = len(param)
else:
print(params)
print(proto)
print("")
continue
ptype = param[:pnamepos]
ptype = self.get_type(ptype)
plist.append(ptype)
f = type("", (object, ), {"restype": c_int})
if not ret.endswith("void"):
f.restype = self.get_type(ret)
try:
f.argtypes = plist
except TypeError, e:
print(e)
print(name)
print(plist)
self.functions[name] = f
def parse_protos(self, filename):
protos = []
unions = []
funcs = []
# first pass: create all structs, but without fields
for line in open(filename):
name, proto = line.split(":", 1)
proto = proto.lstrip()
if name.endswith("()"):
funcs.append(line)
continue
# anonymous structs have no name at all
if name and not name.startswith("ALLEGRO_"):
continue
if name == "ALLEGRO_OGL_EXT_API":
continue
if proto.startswith("union") or\
proto.startswith("typedef union"):
self.add_union(name)
unions.append((name, proto))
elif proto.startswith("struct") or\
proto.startswith("typedef struct"):
self.add_struct(name)
protos.append((name, proto))
elif proto.startswith("enum") or\
proto.startswith("typedef enum"):
if name:
self.types[name] = c_int
protos.append(("", proto))
elif proto.startswith("#define"):
if not name.startswith("_") and not name.startswith("GL_"):
i = eval(proto.split(None, 2)[2])
self.constants[name] = i
else:
# actual typedef
mob = re.match("typedef (.*) " + name, proto)
if mob:
t = mob.group(1)
self.types[name] = self.get_type(t.strip())
else:
# Probably a function pointer
self.types[name] = c_void_p
protos += unions
# second pass: fill in fields
for name, proto in protos:
bo = proto.find("{")
if bo == -1:
continue
bc = proto.rfind("}")
braces = proto[bo + 1:bc]
if proto.startswith("enum") or \
proto.startswith("typedef enum"):
fields = braces.split(",")
i = 0
for field in fields:
if "=" in field:
fname, val = field.split("=", 1)
fname = fname.strip()
try:
i = int(eval(val, globals(), self.constants))
except NameError:
i = val
else:
fname = field.strip()
if not fname:
continue
self.constants[fname] = i
try:
i += 1
except TypeError:
pass
continue
balance = 0
fields = [""]
for c in braces:
if c == "{":
balance += 1
if c == "}":
balance -= 1
if c == ";" and balance == 0:
fields.append("")
else:
fields[-1] += c
flist = []
for field in fields:
if not field:
continue
# add function pointer as void pointer
mob = re.match(".*?\(\*(\w+)\)", field)
if mob:
flist.append((mob.group(1), "c_void_p"))
continue
# add any pointer as void pointer
mob = re.match(".*?\*(\w+)$", field)
if mob:
flist.append((mob.group(1), "c_void_p"))
continue
# add an array
mob = re.match("(.*)( \w+)\[(.*?)\]$", field)
if mob:
# this is all a hack
n = 0
ftype = mob.group(1)
if ftype.startswith("struct"):
if ftype == "struct {float axis[3];}":
t = "c_float * 3"
else:
print("Error: Can't parse " + ftype + " yet.")
t = None
else:
n = mob.group(3)
# something in A5 uses a 2d array
if "][" in n:
n = n.replace("][", " * ")
# something uses a division expression
if "/" in n:
n = "(" + n.replace("/", "//") + ")"
t = self.get_type(ftype).__name__ + " * " + n
fname = mob.group(2)
flist.append((fname, t))
continue
vars = field.split(",")
mob = re.match("\s*(.*?)\s+(\w+)\s*$", vars[0])
t = self.get_type(mob.group(1))
vname = mob.group(2)
if t is not None and vname is not None:
flist.append((vname, t.__name__))
for v in vars[1:]:
flist.append((v.strip(), t.__name__))
else:
print("Error: " + str(vars))
try:
self.types[name].my_fields = flist
except AttributeError:
print(name, flist)
self.parse_funcs(funcs)
def main():
p = optparse.OptionParser()
p.add_option("-o", "--output", help="location of generated file")
p.add_option("-p", "--protos", help="A file with all " +
"prototypes to generate Python wrappers for, one per line. "
"Generate it with docs/scripts/checkdocs.py -p")
p.add_option("-t", "--type", help="the library type to " +
"use, e.g. debug")
p.add_option("-v", "--version", help="the library version to " +
"use, e.g. 5.1")
options, args = p.parse_args()
if not options.protos:
p.print_help()
return
al = Allegro()
al.parse_protos(options.protos)
f = open(options.output, "w") if options.output else sys.stdout
release = options.type
version = options.version
f.write(r"""# Generated by generate_python_ctypes.py.
import os, platform, sys
from ctypes import *
from ctypes.util import *
# You must adjust this function to point ctypes to the A5 DLLs you are
# distributing.
_dlls = []
def _add_dll(name):
release = "%(release)s"
if os.name == "nt":
release = "%(release)s-%(version)s"
# Under Windows, DLLs are found in the current directory, so this
# would be an easy way to keep all your DLLs in a sub-folder.
# os.chdir("dlls")
path = find_library(name + release)
if not path:
if os.name == "mac":
path = name + release + ".dylib"
elif os.name == "nt":
path = name + release + ".dll"
elif os.name == "posix":
if platform.mac_ver()[0]:
path = name + release + ".dylib"
else:
path = "lib" + name + release + ".so"
else:
sys.stderr.write("Cannot find library " + name + "\n")
# In most cases, you actually don't want the above and instead
# use the exact filename within your game distribution, possibly
# even within a .zip file.
# if not os.path.exists(path):
# path = "dlls/" + path
try:
# RTLD_GLOBAL is required under OSX for some reason (?)
_dlls.append(CDLL(path, RTLD_GLOBAL))
except OSError:
# No need to fail here, might just be one of the addons.
pass
# os.chdir("..")
_add_dll("allegro")
_add_dll("allegro_acodec")
_add_dll("allegro_audio")
_add_dll("allegro_primitives")
_add_dll("allegro_color")
_add_dll("allegro_font")
_add_dll("allegro_ttf")
_add_dll("allegro_image")
_add_dll("allegro_dialog")
_add_dll("allegro_memfile")
_add_dll("allegro_physfs")
_add_dll("allegro_shader")
_add_dll("allegro_main")
_add_dll("allegro_monolith")
# We don't have information ready which A5 function is in which DLL,
# so we just try them all.
def _dll(func, ret, params):
for dll in _dlls:
try:
f = dll[func]
f.restype = ret
f.argtypes = params
return f
except AttributeError: pass
sys.stderr.write("Cannot find function " + func + "\n")
return lambda *args: None
# In Python3, all Python strings are unicode so we have to convert to
# UTF8 byte strings before passing to Allegro.
if sys.version_info[0] > 2:
class _AL_UTF8String:
def from_param(x):
return x.encode("utf8")
else:
_AL_UTF8String = c_char_p
""" % locals())
postpone = []
for name, val in sorted(al.constants.items()):
try:
if isinstance(val, str):
val = int(eval(val, globals(), al.constants))
f.write(name + " = " + str(val) + "\n")
except:
postpone.append((name, val))
for name, val in postpone:
f.write(name + " = " + val + "\n")
structs = set()
# output everything except structs and unions
for name, x in sorted(al.types.items()):
if not name:
continue
base = x.__bases__[0]
if base != Structure and base != Union:
f.write(name + " = " + x.__name__ + "\n")
else:
structs.add(name)
# order structs and unions by their dependencies
structs_list = []
remaining = set(structs)
while remaining:
for name in sorted(remaining):
ok = True
x = al.types[name]
if hasattr(x, "my_fields"):
for fname, ftype in x.my_fields:
if " " in ftype:
ftype = ftype.split()[0]
if ftype in structs and ftype in remaining:
ok = False
break
if ok:
structs_list.append(name)
remaining.remove(name)
for name in structs_list:
x = al.types[name]
base = x.__bases__[0]
f.write("class " + name + "(" + base.__name__ + "):\n")
if hasattr(x, "my_fields"):
f.write(" _fields_ = [\n")
for fname, ftype in x.my_fields:
f.write(" (\"" + fname + "\", " + ftype + "),\n")
f.write(" ]\n")
else:
f.write(" pass\n")
pt = POINTER(x)
f.write("%s = POINTER(%s)\n" % (pt.__name__, name))
for name, x in sorted(al.functions.items()):
try:
line = name + " = _dll(\"" + name + "\", "
line += x.restype.__name__ + ", "
line += "[" + (", ".join([a.__name__ for a in x.argtypes])) +\
"])\n"
f.write(line)
except AttributeError as e:
print("Ignoring " + name + " because of errors (" + str(e) + ").")
# some stuff the automated parser doesn't pick up
f.write(r"""
ALLEGRO_VERSION_INT = \
((ALLEGRO_VERSION << 24) | (ALLEGRO_SUB_VERSION << 16) | \
(ALLEGRO_WIP_VERSION << 8) | ALLEGRO_RELEASE_NUMBER)
""")
f.write(r"""
# work around bug http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36834
if os.name == "nt":
def al_map_rgba_f(r, g, b, a): return ALLEGRO_COLOR(r, g, b, a)
def al_map_rgb_f(r, g, b): return ALLEGRO_COLOR(r, g, b, 1)
def al_map_rgba(r, g, b, a):
return ALLEGRO_COLOR(r / 255.0, g / 255.0, b / 255.0, a / 255.0)
def al_map_rgb(r, g, b):
return ALLEGRO_COLOR(r / 255.0, g / 255.0, b / 255.0, 1)
""")
f.write("""
def al_main(real_main, *args):
def python_callback(argc, argv):
real_main(*args)
return 0
cb = CFUNCTYPE(c_int, c_int, c_void_p)(python_callback)
al_run_main(0, 0, cb);
""")
f.close()
main()
| {
"content_hash": "abcc385439b743439080faa603beabef",
"timestamp": "",
"source": "github",
"line_count": 538,
"max_line_length": 78,
"avg_line_length": 32.113382899628256,
"alnum_prop": 0.44897841060369276,
"repo_name": "tsteinholz/SR-Gaming",
"id": "96ad28c2dd9ae6ef03f679cdacad71d486f77efe",
"size": "17299",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Libraries/allegro5-5.1/python/generate_python_ctypes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1656"
},
{
"name": "C",
"bytes": "12915196"
},
{
"name": "C++",
"bytes": "2140602"
},
{
"name": "CMake",
"bytes": "176968"
},
{
"name": "CSS",
"bytes": "4020"
},
{
"name": "GLSL",
"bytes": "3622"
},
{
"name": "HLSL",
"bytes": "6981"
},
{
"name": "HTML",
"bytes": "125"
},
{
"name": "Java",
"bytes": "84112"
},
{
"name": "JavaScript",
"bytes": "35631"
},
{
"name": "Makefile",
"bytes": "1563"
},
{
"name": "Mathematica",
"bytes": "16978"
},
{
"name": "Objective-C",
"bytes": "539745"
},
{
"name": "Perl",
"bytes": "1824"
},
{
"name": "Prolog",
"bytes": "102"
},
{
"name": "Python",
"bytes": "118643"
},
{
"name": "Shell",
"bytes": "39437"
},
{
"name": "TeX",
"bytes": "5585"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from decimal import Decimal
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('indicators', '0020_auto_20190128_1307'),
]
operations = [
migrations.AlterField(
model_name='collecteddata',
name='achieved',
field=models.DecimalField(decimal_places=4, default=Decimal('0.00'), help_text=b'Actual or total for this record', max_digits=20, verbose_name=b'Achieved'),
),
migrations.AlterField(
model_name='historicalcollecteddata',
name='achieved',
field=models.DecimalField(decimal_places=4, default=Decimal('0.00'), help_text=b'Actual or total for this record', max_digits=20, verbose_name=b'Achieved'),
),
migrations.AlterField(
model_name='historicalindicator',
name='actuals',
field=models.DecimalField(blank=True, decimal_places=4, help_text=b'Sum of collected datas achieved', max_digits=20, null=True),
),
migrations.AlterField(
model_name='historicalindicator',
name='lop_target',
field=models.DecimalField(blank=True, decimal_places=4, default=Decimal('0.00'), help_text=b'Life of Program or Project goal for actual', max_digits=20, verbose_name=b'LOP Target'),
),
migrations.AlterField(
model_name='indicator',
name='actuals',
field=models.DecimalField(blank=True, decimal_places=4, help_text=b'Sum of collected datas achieved', max_digits=20, null=True),
),
migrations.AlterField(
model_name='indicator',
name='lop_target',
field=models.DecimalField(blank=True, decimal_places=4, default=Decimal('0.00'), help_text=b'Life of Program or Project goal for actual', max_digits=20, verbose_name=b'LOP Target'),
),
migrations.AlterField(
model_name='periodictarget',
name='target',
field=models.DecimalField(decimal_places=4, default=Decimal('0.00'), max_digits=20),
),
]
| {
"content_hash": "70c4ea20b2741e814ca0cffd22ebf226",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 193,
"avg_line_length": 43.93877551020408,
"alnum_prop": 0.6270320483046912,
"repo_name": "toladata/TolaActivity",
"id": "b4bf32ad12321890846cf879dbb2f4fcb962e4f2",
"size": "2227",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indicators/migrations/0021_auto_20190220_0254.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "497127"
},
{
"name": "JavaScript",
"bytes": "114367"
},
{
"name": "Python",
"bytes": "786590"
},
{
"name": "Shell",
"bytes": "889"
}
],
"symlink_target": ""
} |
from openvino.inference_engine import IENetwork, IEPlugin
import argparse
import numpy as np
from urllib.parse import urlparse
from google.cloud import storage
from google.auth import exceptions
import classes
import datetime
from shutil import copy
import os
import json
def get_local_file(source_path):
parsed_path = urlparse(source_path)
if parsed_path.scheme == "gs":
bucket_name = parsed_path.netloc
file_path = parsed_path.path[1:]
file_name = os.path.split(parsed_path.path)[1]
try:
gs_client = storage.Client()
bucket = gs_client.get_bucket(bucket_name)
except exceptions.DefaultCredentialsError:
# if credentials fails, try to connect as anonymous user
gs_client = storage.Client.create_anonymous_client()
bucket = gs_client.bucket(bucket_name, user_project=None)
blob = bucket.blob(file_path)
blob.download_to_filename(file_name)
elif parsed_path.scheme == "":
# in case of local path just pass the input argument
if os.path.isfile(source_path):
file_name = source_path
else:
print("file " + source_path + "is not accessible")
file_name = ""
return file_name
def upload_file(source_file, target_folder):
parsed_path = urlparse(target_folder)
if parsed_path.scheme == "gs":
bucket_name = parsed_path.netloc
folder_path = parsed_path.path[1:]
try:
gs_client = storage.Client()
bucket = gs_client.get_bucket(bucket_name)
blob = bucket.blob(folder_path + "/" + source_file)
blob.upload_from_filename(source_file)
except Exception as er:
print(er)
return False
elif parsed_path.scheme == "":
if target_folder != ".":
copy(source_file, target_folder)
return True
def main():
parser = argparse.ArgumentParser(
description='Component executing inference operation')
parser.add_argument('--model_bin', type=str, required=True,
help='GCS or local path to model weights file (.bin)')
parser.add_argument('--model_xml', type=str, required=True,
help='GCS or local path to model graph (.xml)')
parser.add_argument('--input_numpy_file', type=str, required=True,
help='GCS or local path to input dataset numpy file')
parser.add_argument('--label_numpy_file', type=str, required=True,
help='GCS or local path to numpy file with labels')
parser.add_argument('--output_folder', type=str, required=True,
help='GCS or local path to results upload folder')
parser.add_argument('--batch_size', type=int, default=1,
help='batch size to be used for inference')
parser.add_argument('--scale_div', type=float, default=1,
help='scale the np input by division of by the value')
parser.add_argument('--scale_sub', type=float, default=128,
help='scale the np input by substraction of the value')
args = parser.parse_args()
print(args)
device = "CPU"
plugin_dir = None
model_xml = get_local_file(args.model_xml)
print("model xml", model_xml)
if model_xml == "":
exit(1)
model_bin = get_local_file(args.model_bin)
print("model bin", model_bin)
if model_bin == "":
exit(1)
input_numpy_file = get_local_file(args.input_numpy_file)
print("input_numpy_file", input_numpy_file)
if input_numpy_file == "":
exit(1)
label_numpy_file = get_local_file(args.label_numpy_file)
print("label_numpy_file", label_numpy_file)
if label_numpy_file == "":
exit(1)
cpu_extension = "/usr/local/lib/libcpu_extension.so"
plugin = IEPlugin(device=device, plugin_dirs=plugin_dir)
if cpu_extension and 'CPU' in device:
plugin.add_cpu_extension(cpu_extension)
print("inference engine:", model_xml, model_bin, device)
# Read IR
print("Reading IR...")
net = IENetwork(model=model_xml, weights=model_bin)
batch_size = args.batch_size
net.batch_size = batch_size
print("Model loaded. Batch size", batch_size)
input_blob = next(iter(net.inputs))
output_blob = next(iter(net.outputs))
print(output_blob)
print("Loading IR to the plugin...")
exec_net = plugin.load(network=net, num_requests=1)
print("Loading input numpy")
imgs = np.load(input_numpy_file, mmap_mode='r', allow_pickle=False)
imgs = (imgs / args.scale_div) - args.scale_div
lbs = np.load(label_numpy_file, mmap_mode='r', allow_pickle=False)
print("Loaded input data", imgs.shape, imgs.dtype, "Min value:", np.min(imgs), "Max value", np.max(imgs))
combined_results = {} # dictionary storing results for all model outputs
processing_times = np.zeros((0),int)
matched_count = 0
total_executed = 0
for x in range(0, imgs.shape[0] - batch_size + 1, batch_size):
img = imgs[x:(x + batch_size)]
lb = lbs[x:(x + batch_size)]
start_time = datetime.datetime.now()
results = exec_net.infer(inputs={input_blob: img})
end_time = datetime.datetime.now()
duration = (end_time - start_time).total_seconds() * 1000
print("Inference duration:", duration, "ms")
processing_times = np.append(processing_times,np.array([int(duration)]))
output = list(results.keys())[0] # check only one output
nu = results[output]
for i in range(nu.shape[0]):
single_result = nu[[i],...]
ma = np.argmax(single_result)
total_executed += 1
if ma == lb[i]:
matched_count += 1
mark_message = "; Correct match."
else:
mark_message = "; Incorrect match. Should be {} {}".format(lb[i], classes.imagenet_classes[lb[i]] )
print("\t",i, classes.imagenet_classes[ma],ma, mark_message)
if output in combined_results:
combined_results[output] = np.append(combined_results[output],
results[output], 0)
else:
combined_results[output] = results[output]
filename = output.replace("/", "_") + ".npy"
np.save(filename, combined_results[output])
upload_file(filename, args.output_folder)
print("Inference results uploaded to", filename)
print('Classification accuracy: {:.2f}'.format(100*matched_count/total_executed))
print('Average time: {:.2f} ms; average speed: {:.2f} fps'.format(round(np.average(processing_times), 2),round(1000 * batch_size / np.average(processing_times), 2)))
accuracy = matched_count/total_executed
latency = np.average(processing_times)
metrics = {'metrics': [{'name': 'accuracy-score','numberValue': accuracy,'format': "PERCENTAGE"},
{'name': 'latency','numberValue': latency,'format': "RAW"}]}
with open('/mlpipeline-metrics.json', 'w') as f:
json.dump(metrics, f)
if __name__ == "__main__":
main()
| {
"content_hash": "9945868ff695e547bf9dcb7cf3337f00",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 169,
"avg_line_length": 40.40677966101695,
"alnum_prop": 0.610738255033557,
"repo_name": "kubeflow/pipelines",
"id": "1ea24c2d391fd227ee63e8b10c71b2e740105e6c",
"size": "7152",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/components/openvino/predict/containers/predict.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "799"
},
{
"name": "CSS",
"bytes": "2171"
},
{
"name": "Dockerfile",
"bytes": "49331"
},
{
"name": "Go",
"bytes": "1903937"
},
{
"name": "HTML",
"bytes": "3656"
},
{
"name": "JavaScript",
"bytes": "544297"
},
{
"name": "Jinja",
"bytes": "938"
},
{
"name": "Jupyter Notebook",
"bytes": "359548"
},
{
"name": "Makefile",
"bytes": "22164"
},
{
"name": "Mustache",
"bytes": "23652"
},
{
"name": "PowerShell",
"bytes": "3194"
},
{
"name": "Python",
"bytes": "5684887"
},
{
"name": "Shell",
"bytes": "264595"
},
{
"name": "Smarty",
"bytes": "8295"
},
{
"name": "Starlark",
"bytes": "553"
},
{
"name": "TypeScript",
"bytes": "4294958"
}
],
"symlink_target": ""
} |
import copy
from nova.api.validation import parameter_types
snapshots_create = {
'type': 'object',
'properties': {
'snapshot': {
'type': 'object',
'properties': {
'volume_id': {
'type': 'string', 'minLength': 1,
},
'create_info': {
'type': 'object',
'properties': {
'snapshot_id': {
'type': 'string', 'minLength': 1,
},
'type': {
'type': 'string', 'enum': ['qcow2'],
},
'new_file': {
'type': 'string', 'minLength': 1,
},
'id': {
'type': 'string', 'minLength': 1,
},
},
'required': ['snapshot_id', 'type', 'new_file'],
'additionalProperties': False,
},
},
'required': ['volume_id', 'create_info'],
'additionalProperties': False,
}
},
'required': ['snapshot'],
'additionalProperties': False,
}
delete_query = {
'type': 'object',
'properties': {
'delete_info': parameter_types.multi_params({'type': 'string'})
},
# NOTE(gmann): This is kept True to keep backward compatibility.
# As of now Schema validation stripped out the additional parameters and
# does not raise 400. In microversion 2.75, we have blocked the additional
# parameters.
'additionalProperties': True
}
delete_query_275 = copy.deepcopy(delete_query)
delete_query_275['additionalProperties'] = False
| {
"content_hash": "f0bdc4a38c9a80241d239cec440794d8",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 78,
"avg_line_length": 32.6,
"alnum_prop": 0.4316787506971556,
"repo_name": "openstack/nova",
"id": "0a13c50b11414877d468c1bd7ec878fe536ae06a",
"size": "2424",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/schemas/assisted_volume_snapshots.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3545"
},
{
"name": "Mako",
"bytes": "1952"
},
{
"name": "Python",
"bytes": "23261880"
},
{
"name": "Shell",
"bytes": "28113"
},
{
"name": "Smarty",
"bytes": "507244"
}
],
"symlink_target": ""
} |
import os
from golem.report import report
from golem.core.project import Project
class TestGetLastExecutionTimestamps:
def test_get_last_execution_timestamps(self, project_function, test_utils):
_, project = project_function.activate()
# suite does not exist
last_exec = report.get_last_execution_timestamps([project], 'suite_does_not_exist')
assert last_exec[project] == {}
# suite with no executions
suite_name = 'suite1'
test_utils.create_test(project, name='test1')
test_utils.create_suite(project, name=suite_name, tests=['test1'])
assert last_exec[project] == {}
# suite with one execution
timestamp = test_utils.run_suite(project, suite_name)
last_exec = report.get_last_execution_timestamps([project], suite_name)
assert last_exec[project] == {suite_name: [timestamp]}
# multiple executions
timestamp_first = test_utils.run_suite(project, suite_name)
timestamp_second = test_utils.run_suite(project, suite_name)
last_exec = report.get_last_execution_timestamps([project], suite_name, limit=2)
assert len(last_exec[project][suite_name]) == 2
assert last_exec[project][suite_name][0] == timestamp_second
assert last_exec[project][suite_name][1] == timestamp_first
class TestDeleteExecution:
def test_delete_execution(self, project_class, test_utils):
_, project = project_class.activate()
execution = test_utils.execute_random_suite(project)
execpath = os.path.join(Project(project).report_directory_path, execution['suite_name'])
assert os.path.isdir(execpath)
assert os.path.isdir(execution['exec_dir'])
errors = report.delete_execution(project, execution['suite_name'])
assert errors == []
assert not os.path.isdir(execpath)
class TestDeleteExecutionTimestamp:
def test_delete_execution_timestamp(self, project_class, test_utils):
_, project = project_class.activate()
execution = test_utils.execute_random_suite(project)
execpath = os.path.join(Project(project).report_directory_path, execution['suite_name'])
assert os.path.isdir(execution['exec_dir'])
errors = report.delete_execution_timestamp(project, execution['suite_name'], execution['timestamp'])
assert errors == []
assert not os.path.isdir(execution['exec_dir'])
assert os.path.isdir(execpath) # folder for execution name still exists
def test_delete_execution_timestamp_does_not_exist(self, project_class, test_utils):
_, project = project_class.activate()
execution = test_utils.random_string()
timestamp = test_utils.random_string()
errors = report.delete_execution_timestamp(project, execution, timestamp)
assert errors == [f'Execution for {project} {execution} {timestamp} does not exist']
| {
"content_hash": "71c699e9ad3f96b7d90ae602a3a5b1d2",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 108,
"avg_line_length": 41.267605633802816,
"alnum_prop": 0.6757679180887372,
"repo_name": "lucianopuccio/golem",
"id": "4756e0f31270be98d35e69470908702375972fa5",
"size": "2930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/report/report_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19253"
},
{
"name": "HTML",
"bytes": "72727"
},
{
"name": "JavaScript",
"bytes": "119483"
},
{
"name": "Python",
"bytes": "520538"
}
],
"symlink_target": ""
} |
from ginga import trcalc
from ginga.util import wcs
from ginga.util.six.moves import map
class CanvasMapper(object):
"""A coordinate mapper that maps to the viewer's canvas in
canvas coordinates.
"""
def __init__(self, viewer):
# record the viewer just in case
self.viewer = viewer
def to_canvas(self, canvas_x, canvas_y):
return (canvas_x, canvas_y)
def to_data(self, canvas_x, canvas_y):
return self.viewer.get_data_xy(canvas_x, canvas_y)
def offset_pt(self, pt, xoff, yoff):
x, y = pt
return x + xoff, y + yoff
def rotate_pt(self, x, y, theta, xoff=0, yoff=0):
# TODO? Not sure if it is needed with this mapper type
return x, y
class DataMapper(object):
"""A coordinate mapper that maps to the viewer's canvas
in data coordinates.
"""
def __init__(self, viewer):
self.viewer = viewer
def to_canvas(self, data_x, data_y):
return self.viewer.canvascoords(data_x, data_y)
def to_data(self, data_x, data_y):
return data_x, data_y
def data_to(self, data_x, data_y):
return data_x, data_y
def offset_pt(self, pt, xoff, yoff):
x, y = pt
return x + xoff, y + yoff
def rotate_pt(self, x, y, theta, xoff=0, yoff=0):
return trcalc.rotate_pt(x, y, theta, xoff=xoff, yoff=yoff)
class OffsetMapper(object):
"""A coordinate mapper that maps to the viewer's canvas
in data coordinates that are offsets relative to some other
reference object.
"""
def __init__(self, viewer, refobj):
# TODO: provide a keyword arg to specify which point in the obj
self.viewer = viewer
self.refobj = refobj
def calc_offsets(self, points):
ref_x, ref_y = self.refobj.get_reference_pt()
#return map(lambda x, y: x - ref_x, y - ref_y, points)
def _cvt(pt):
x, y = pt
return x - ref_x, y - ref_y
return map(_cvt, points)
def to_canvas(self, delta_x, delta_y):
data_x, data_y = self.to_data(delta_x, delta_y)
return self.viewer.canvascoords(data_x, data_y)
def to_data(self, delta_x, delta_y):
ref_x, ref_y = self.refobj.get_reference_pt()
data_x, data_y = self.refobj.crdmap.to_data(ref_x, ref_y)
return data_x + delta_x, data_y + delta_y
## def data_to(self, data_x, data_y):
## ref_x, ref_y = self.refobj.get_reference_pt()
## return data_x - ref_data_x, data_y - ref_data_y
def offset_pt(self, pt, xoff, yoff):
# A no-op because this object's points are always considered
# relative to the reference object
return pt
def rotate_pt(self, x, y, theta, xoff=0, yoff=0):
# TODO? Not sure if it is needed with this mapper type
return x, y
class WCSMapper(DataMapper):
"""A coordinate mapper that maps to the viewer's canvas
in WCS coordinates.
"""
def to_canvas(self, lon, lat):
data_x, data_y = self.to_data(lon, lat)
return super(WCSMapper, self).to_canvas(data_x, data_y)
def to_data(self, lon, lat):
image = self.viewer.get_image()
data_x, data_y = image.radectopix(lon, lat)
return data_x, data_y
def data_to(self, data_x, data_y):
image = self.viewer.get_image()
lon, lat = image.pixtoradec(data_x, data_y)
return lon, lat
def offset_pt(self, pt, xoff, yoff):
x, y = pt
return wcs.add_offset_radec(x, y, xoff, yoff)
def rotate_pt(self, x, y, theta, xoff=0, yoff=0):
# TODO: optomize by rotating in WCS space
x, y = self.to_data(x, y)
xoff, yoff = self.to_data(xoff, yoff)
x, y = super(WCSMapper, self).rotate_pt(x, y, theta,
xoff=xoff, yoff=yoff)
x, y = self.data_to(x, y)
return x, y
#END
| {
"content_hash": "9dbf027110c3fed2b348a0f0e93f8a0c",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 71,
"avg_line_length": 31.24,
"alnum_prop": 0.5871959026888605,
"repo_name": "rajul/ginga",
"id": "56cbfa41504c336a0ce9a6d60530d0a73a88a4fd",
"size": "4144",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ginga/canvas/coordmap.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2151"
},
{
"name": "JavaScript",
"bytes": "85309"
},
{
"name": "Jupyter Notebook",
"bytes": "1589549"
},
{
"name": "Makefile",
"bytes": "85"
},
{
"name": "Python",
"bytes": "2795955"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import
import datetime
import logging
import re
from requests.exceptions import TooManyRedirects
from sqlalchemy import Column, Unicode, DateTime
from flexget import plugin, db_schema
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.manager import Session
from flexget.utils.database import json_synonym
from flexget.utils.requests import Session as RequestSession, TimedLimiter, RequestException
from flexget.utils.soup import get_soup
from flexget.utils.tools import parse_filesize
log = logging.getLogger('alpharatio')
Base = db_schema.versioned_base('alpharatio', 0)
requests = RequestSession()
requests.add_domain_limiter(TimedLimiter('alpharatio.cc', '5 seconds'))
# ElementZero confirmed with AlphaRato sysop 'jasonmaster' that they do want a 5 second limiter
CATEGORIES = {
'tvsd': 'filter_cat[1]',
'tvhd': 'filter_cat[2]',
'tvdvdrip': 'filter_cat[3]',
'tvpacksd': 'filter_cat[4]',
'tvpackhd': 'filter_cat[5]',
'moviesd': 'filter_cat[6]',
'moviehd': 'filter_cat[7]',
'moviepacksd': 'filter_cat[8]',
'moviepackhd': 'filter_cat[9]',
'moviexxx': 'filter_cat[10]',
'mvid': 'filter_cat[11]',
'gamespc': 'filter_cat[12]',
'gamesxbox': 'filter_cat[13]',
'gamesps3': 'filter_cat[14]',
'gameswii': 'filter_cat[15]',
'appspc': 'filter_cat[16]',
'appsmac': 'filter_cat[17]',
'appslinux': 'filter_cat[18]',
'appsmobile': 'filter_cat[19]',
'0dayXXX': 'filter_cat[20]',
'ebook': 'filter_cat[21]',
'audiobook': 'filter_cat[22]',
'music': 'filter_cat[23]',
'misc': 'filter_cat[24]'
}
LEECHSTATUS = {
'normal': 0,
'freeleech': 1,
'neutral leech': 2,
'either': 3
}
class AlphaRatioCookie(Base):
__tablename__ = 'alpharatio_cookie'
username = Column(Unicode, primary_key=True)
_cookie = Column('cookie', Unicode)
cookie = json_synonym('_cookie')
expires = Column(DateTime)
class SearchAlphaRatio(object):
"""
AlphaRatio search plugin.
"""
schema = {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'password': {'type': 'string'},
'category': one_or_more({'type': 'string', 'enum': list(CATEGORIES.keys())}, unique_items=True),
'order_by': {'type': 'string', 'enum': ['seeders', 'leechers', 'time', 'size', 'year', 'snatched'],
'default': 'time'},
'order_desc': {'type': 'boolean', 'default': True},
'scene': {'type': 'boolean'},
'leechstatus': {'type': 'string', 'enum': list(LEECHSTATUS.keys()), 'default': 'normal'},
},
'required': ['username', 'password'],
'additionalProperties': False
}
base_url = 'https://alpharatio.cc/'
errors = False
def get(self, url, params, username, password, force=False):
"""
Wrapper to allow refreshing the cookie if it is invalid for some reason
:param unicode url:
:param dict params:
:param str username:
:param str password:
:param bool force: flag used to refresh the cookie forcefully ie. forgo DB lookup
:return:
"""
cookies = self.get_login_cookie(username, password, force=force)
invalid_cookie = False
try:
response = requests.get(url, params=params, cookies=cookies)
if self.base_url + 'login.php' in response.url:
invalid_cookie = True
except TooManyRedirects:
# Apparently it endlessly redirects if the cookie is invalid?
log.debug('MoreThanTV request failed: Too many redirects. Invalid cookie?')
invalid_cookie = True
if invalid_cookie:
if self.errors:
raise plugin.PluginError('AlphaRatio login cookie is invalid. Login page received?')
self.errors = True
# try again
response = self.get(url, params, username, password, force=True)
else:
self.errors = False
return response
def get_login_cookie(self, username, password, force=False):
"""
Retrieves login cookie
:param str username:
:param str password:
:param bool force: if True, then retrieve a fresh cookie instead of looking in the DB
:return:
"""
if not force:
with Session() as session:
saved_cookie = session.query(AlphaRatioCookie).filter(AlphaRatioCookie.username == username).first()
if saved_cookie and saved_cookie.expires and saved_cookie.expires >= datetime.datetime.now():
log.debug('Found valid login cookie')
return saved_cookie.cookie
url = self.base_url + 'login.php'
try:
log.debug('Attempting to retrieve AlphaRatio cookie')
response = requests.post(url, data={'username': username, 'password': password, 'login': 'Log in',
'keeplogged': '1'}, timeout=30)
except RequestException as e:
raise plugin.PluginError('AlphaRatio login failed: %s' % e)
if 'Your username or password was incorrect.' in response.text:
raise plugin.PluginError('AlphaRatio login failed: Your username or password was incorrect.')
with Session() as session:
expires = None
for c in requests.cookies:
if c.name == 'session':
expires = c.expires
if expires:
expires = datetime.datetime.fromtimestamp(expires)
log.debug('Saving or updating AlphaRatio cookie in db')
cookie = AlphaRatioCookie(username=username, cookie=dict(requests.cookies), expires=expires)
session.merge(cookie)
return cookie.cookie
def find_index(self, soup, text):
"""Finds the index of the tag containing the text"""
for i in range(0, len(soup)):
img = soup[i].find('img')
if soup[i].text.strip() == '' and img and text.lower() in img.get('title').lower():
return i
elif text.lower() in soup[i].text.lower():
return i
raise plugin.PluginError('AlphaRatio layout has changed, unable to parse correctly. Please open a Github issue')
@plugin.internet(log)
def search(self, task, entry, config):
"""
Search for entries on AlphaRatio
"""
params = {}
if 'category' in config:
categories = config['category'] if isinstance(config['category'], list) else [config['category']]
for category in categories:
params[CATEGORIES[category]] = 1
if 'scene' in config:
params['scene'] = int(config['scene'])
ordering = 'desc' if config['order_desc'] else 'asc'
entries = set()
params.update({'order_by': config['order_by'], 'search_submit': 1, 'action': 'basic', 'order_way': ordering,
'freeleech': LEECHSTATUS[config['leechstatus']]})
for search_string in entry.get('search_strings', [entry['title']]):
params['searchstr'] = search_string
log.debug('Using search params: %s', params)
try:
page = self.get(self.base_url + 'torrents.php', params, config['username'], config['password'])
log.debug('requesting: %s', page.url)
except RequestException as e:
log.error('AlphaRatio request failed: %s', e)
continue
soup = get_soup(page.content)
# extract the column indices
header_soup = soup.find('tr', attrs={'class': 'colhead'})
if not header_soup:
log.debug('no search results found for \'%s\'', search_string)
continue
header_soup = header_soup.findAll('td')
size_idx = self.find_index(header_soup, 'size')
snatches_idx = self.find_index(header_soup, 'snatches')
seeds_idx = self.find_index(header_soup, 'seeders')
leeches_idx = self.find_index(header_soup, 'leechers')
for result in soup.findAll('tr', attrs={'class': 'torrent'}):
group_info = result.find('td', attrs={'class': 'big_info'}).find('div', attrs={'class': 'group_info'})
title = group_info.find('a', href=re.compile('torrents.php\?id=\d+')).text
url = self.base_url + \
group_info.find('a', href=re.compile('torrents.php\?action=download(?!usetoken)'))['href']
torrent_info = result.findAll('td')
size_col = torrent_info[size_idx].text
log.debug('AlphaRatio size: %s', size_col)
size = re.search('(\d+(?:[.,]\d+)*)\s?([KMGTP]B)', size_col)
torrent_tags = ', '.join([tag.text for tag in group_info.findAll('div', attrs={'class': 'tags'})])
e = Entry()
e['title'] = title
e['url'] = url
e['torrent_tags'] = torrent_tags
if not size:
log.error('No size found! Please create a Github issue. Size received: %s', size_col)
else:
e['content_size'] = parse_filesize(size.group(0))
e['torrent_snatches'] = int(torrent_info[snatches_idx].text)
e['torrent_seeds'] = int(torrent_info[seeds_idx].text)
e['torrent_leeches'] = int(torrent_info[leeches_idx].text)
entries.add(e)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(SearchAlphaRatio, 'alpharatio', interfaces=['search'], api_ver=2)
| {
"content_hash": "d40c2816745db4b5d6b19762c82c7de9",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 120,
"avg_line_length": 38.57198443579767,
"alnum_prop": 0.5790376273580147,
"repo_name": "OmgOhnoes/Flexget",
"id": "984decc0b6c346bfd54a383c21228d2521f59ced",
"size": "9913",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "flexget/plugins/sites/alpharatio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "HTML",
"bytes": "79376"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3324701"
},
{
"name": "SRecode Template",
"bytes": "3"
}
],
"symlink_target": ""
} |
import os
from .. import bot
from .. import db
from .test_bot import EXAMPLE_TWEET
TESTDB = 'test_goldstar.db'
class TestDB():
def setup(self):
os.remove(TESTDB)
self.db = db.GoldStarDB(TESTDB)
def teardown(self):
os.remove(TESTDB)
def test_db_save(self):
# Save a tweet to the test db and check if it got inserted
handler = bot.TweetHandler(EXAMPLE_TWEET, dbfile=TESTDB, dry_run=True)
handler.handle()
for recipient in handler.get_recipients():
assert self.db.count_stars(recipient['id']) == 1
assert self.db.count_stars(123) == 0 # Random user_id
def test_db_save(self):
# Save a tweet to the test db and check if it got inserted
handler = bot.TweetHandler(EXAMPLE_TWEET, dbfile=TESTDB, dry_run=True)
handler.handle()
# Have stars been added?
for recipient in handler.get_recipients():
assert self.db.count_stars(recipient['id']) == 1
# Does a random user_id have any stars?
assert self.db.count_stars(123) == 0
# Can we successfully delete a star?
for recipient in handler.get_recipients():
self.db.delete_star(status_id=EXAMPLE_TWEET['id'],
recipient_id=recipient['id'])
assert self.db.count_stars(recipient['id']) == 0
| {
"content_hash": "8b2edd9f3971a300060598bc0e630533",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 78,
"avg_line_length": 33.31707317073171,
"alnum_prop": 0.6112737920937042,
"repo_name": "barentsen/AstroGoldStars",
"id": "985a4bd665aac429039a8184a7285986057b38c2",
"size": "1366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot/tests/test_db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3897"
},
{
"name": "HTML",
"bytes": "5278"
},
{
"name": "Python",
"bytes": "12426"
}
],
"symlink_target": ""
} |
import logging
import numpy as np
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.models.torch.misc import SlimFC, AppendBiasLayer, \
normc_initializer
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
torch, nn = try_import_torch()
logger = logging.getLogger(__name__)
class FullyConnectedNetwork(TorchModelV2, nn.Module):
"""Generic fully connected network."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
activation = model_config.get("fcnet_activation")
hiddens = model_config.get("fcnet_hiddens")
no_final_linear = model_config.get("no_final_linear")
self.vf_share_layers = model_config.get("vf_share_layers")
self.free_log_std = model_config.get("free_log_std")
# Generate free-floating bias variables for the second half of
# the outputs.
if self.free_log_std:
assert num_outputs % 2 == 0, (
"num_outputs must be divisible by two", num_outputs)
num_outputs = num_outputs // 2
layers = []
prev_layer_size = int(np.product(obs_space.shape))
self._logits = None
# Create layers 0 to second-last.
for size in hiddens[:-1]:
layers.append(
SlimFC(
in_size=prev_layer_size,
out_size=size,
initializer=normc_initializer(1.0),
activation_fn=activation))
prev_layer_size = size
# The last layer is adjusted to be of size num_outputs, but it's a
# layer with activation.
if no_final_linear and num_outputs:
layers.append(
SlimFC(
in_size=prev_layer_size,
out_size=num_outputs,
initializer=normc_initializer(1.0),
activation_fn=activation))
prev_layer_size = num_outputs
# Finish the layers with the provided sizes (`hiddens`), plus -
# iff num_outputs > 0 - a last linear layer of size num_outputs.
else:
if len(hiddens) > 0:
layers.append(
SlimFC(
in_size=prev_layer_size,
out_size=hiddens[-1],
initializer=normc_initializer(1.0),
activation_fn=activation))
prev_layer_size = hiddens[-1]
if num_outputs:
self._logits = SlimFC(
in_size=prev_layer_size,
out_size=num_outputs,
initializer=normc_initializer(0.01),
activation_fn=None)
else:
self.num_outputs = (
[int(np.product(obs_space.shape))] + hiddens[-1:])[-1]
# Layer to add the log std vars to the state-dependent means.
if self.free_log_std and self._logits:
self._append_free_log_std = AppendBiasLayer(num_outputs)
self._hidden_layers = nn.Sequential(*layers)
self._value_branch_separate = None
if not self.vf_share_layers:
# Build a parallel set of hidden layers for the value net.
prev_vf_layer_size = int(np.product(obs_space.shape))
vf_layers = []
for size in hiddens:
vf_layers.append(
SlimFC(
in_size=prev_vf_layer_size,
out_size=size,
activation_fn=activation,
initializer=normc_initializer(1.0)))
prev_vf_layer_size = size
self._value_branch_separate = nn.Sequential(*vf_layers)
self._value_branch = SlimFC(
in_size=prev_layer_size,
out_size=1,
initializer=normc_initializer(1.0),
activation_fn=None)
# Holds the current "base" output (before logits layer).
self._features = None
# Holds the last input, in case value branch is separate.
self._last_flat_in = None
@override(TorchModelV2)
def forward(self, input_dict, state, seq_lens):
obs = input_dict["obs_flat"].float()
self._last_flat_in = obs.reshape(obs.shape[0], -1)
self._features = self._hidden_layers(self._last_flat_in)
logits = self._logits(self._features) if self._logits else \
self._features
if self.free_log_std:
logits = self._append_free_log_std(logits)
return logits, state
@override(TorchModelV2)
def value_function(self):
assert self._features is not None, "must call forward() first"
if self._value_branch_separate:
return self._value_branch(
self._value_branch_separate(self._last_flat_in)).squeeze(1)
else:
return self._value_branch(self._features).squeeze(1)
| {
"content_hash": "2fbaad9db35daa3bdcd0f2c85ebe3be2",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 75,
"avg_line_length": 39.404580152671755,
"alnum_prop": 0.5559860519178613,
"repo_name": "robertnishihara/ray",
"id": "4080c93185a2161183b84fda41dfde5d723d1f1c",
"size": "5162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/models/torch/fcnet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "82909"
},
{
"name": "C++",
"bytes": "3971373"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Cython",
"bytes": "179979"
},
{
"name": "Dockerfile",
"bytes": "6468"
},
{
"name": "Go",
"bytes": "23139"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1248954"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "2205"
},
{
"name": "Python",
"bytes": "6567694"
},
{
"name": "Shell",
"bytes": "102477"
},
{
"name": "Starlark",
"bytes": "231513"
},
{
"name": "TypeScript",
"bytes": "147793"
}
],
"symlink_target": ""
} |
"""Support for SimpliSafe alarm systems."""
from __future__ import annotations
import asyncio
from collections.abc import Callable, Iterable
from datetime import timedelta
from typing import Any, cast
from simplipy import API
from simplipy.device import Device, DeviceTypes
from simplipy.errors import (
EndpointUnavailableError,
InvalidCredentialsError,
SimplipyError,
WebsocketError,
)
from simplipy.system import SystemNotification
from simplipy.system.v3 import (
MAX_ALARM_DURATION,
MAX_ENTRY_DELAY_AWAY,
MAX_ENTRY_DELAY_HOME,
MAX_EXIT_DELAY_AWAY,
MAX_EXIT_DELAY_HOME,
MIN_ALARM_DURATION,
MIN_ENTRY_DELAY_AWAY,
MIN_EXIT_DELAY_AWAY,
SystemV3,
Volume,
)
from simplipy.websocket import (
EVENT_AUTOMATIC_TEST,
EVENT_CAMERA_MOTION_DETECTED,
EVENT_CONNECTION_LOST,
EVENT_CONNECTION_RESTORED,
EVENT_DEVICE_TEST,
EVENT_DOORBELL_DETECTED,
EVENT_LOCK_LOCKED,
EVENT_LOCK_UNLOCKED,
EVENT_POWER_OUTAGE,
EVENT_POWER_RESTORED,
EVENT_SECRET_ALERT_TRIGGERED,
EVENT_SENSOR_PAIRED_AND_NAMED,
EVENT_USER_INITIATED_TEST,
WebsocketEvent,
)
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry, ConfigEntryState
from homeassistant.const import (
ATTR_CODE,
ATTR_DEVICE_ID,
CONF_CODE,
CONF_TOKEN,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
Platform,
)
from homeassistant.core import CoreState, Event, HomeAssistant, ServiceCall, callback
from homeassistant.exceptions import (
ConfigEntryAuthFailed,
ConfigEntryNotReady,
HomeAssistantError,
)
from homeassistant.helpers import (
aiohttp_client,
config_validation as cv,
device_registry as dr,
)
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.issue_registry import IssueSeverity, async_create_issue
from homeassistant.helpers.service import (
async_register_admin_service,
verify_domain_control,
)
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
ATTR_ALARM_DURATION,
ATTR_ALARM_VOLUME,
ATTR_CHIME_VOLUME,
ATTR_ENTRY_DELAY_AWAY,
ATTR_ENTRY_DELAY_HOME,
ATTR_EXIT_DELAY_AWAY,
ATTR_EXIT_DELAY_HOME,
ATTR_LIGHT,
ATTR_VOICE_PROMPT_VOLUME,
DOMAIN,
LOGGER,
)
from .typing import SystemType
ATTR_CATEGORY = "category"
ATTR_LAST_EVENT_CHANGED_BY = "last_event_changed_by"
ATTR_LAST_EVENT_INFO = "last_event_info"
ATTR_LAST_EVENT_SENSOR_NAME = "last_event_sensor_name"
ATTR_LAST_EVENT_SENSOR_SERIAL = "last_event_sensor_serial"
ATTR_LAST_EVENT_SENSOR_TYPE = "last_event_sensor_type"
ATTR_LAST_EVENT_TIMESTAMP = "last_event_timestamp"
ATTR_LAST_EVENT_TYPE = "last_event_type"
ATTR_LAST_EVENT_TYPE = "last_event_type"
ATTR_MESSAGE = "message"
ATTR_PIN_LABEL = "label"
ATTR_PIN_LABEL_OR_VALUE = "label_or_pin"
ATTR_PIN_VALUE = "pin"
ATTR_SYSTEM_ID = "system_id"
ATTR_TIMESTAMP = "timestamp"
DEFAULT_CONFIG_URL = "https://webapp.simplisafe.com/new/#/dashboard"
DEFAULT_ENTITY_MODEL = "Alarm control panel"
DEFAULT_ERROR_THRESHOLD = 2
DEFAULT_SCAN_INTERVAL = timedelta(seconds=30)
DEFAULT_SOCKET_MIN_RETRY = 15
DISPATCHER_TOPIC_WEBSOCKET_EVENT = "simplisafe_websocket_event_{0}"
EVENT_SIMPLISAFE_EVENT = "SIMPLISAFE_EVENT"
EVENT_SIMPLISAFE_NOTIFICATION = "SIMPLISAFE_NOTIFICATION"
PLATFORMS = [
Platform.ALARM_CONTROL_PANEL,
Platform.BINARY_SENSOR,
Platform.BUTTON,
Platform.LOCK,
Platform.SENSOR,
]
VOLUME_MAP = {
"high": Volume.HIGH,
"low": Volume.LOW,
"medium": Volume.MEDIUM,
"off": Volume.OFF,
}
SERVICE_NAME_CLEAR_NOTIFICATIONS = "clear_notifications"
SERVICE_NAME_REMOVE_PIN = "remove_pin"
SERVICE_NAME_SET_PIN = "set_pin"
SERVICE_NAME_SET_SYSTEM_PROPERTIES = "set_system_properties"
SERVICES = (
SERVICE_NAME_CLEAR_NOTIFICATIONS,
SERVICE_NAME_REMOVE_PIN,
SERVICE_NAME_SET_PIN,
SERVICE_NAME_SET_SYSTEM_PROPERTIES,
)
SERVICE_CLEAR_NOTIFICATIONS_SCHEMA = vol.Schema(
{
vol.Required(ATTR_DEVICE_ID): cv.string,
},
)
SERVICE_REMOVE_PIN_SCHEMA = vol.Schema(
{
vol.Required(ATTR_DEVICE_ID): cv.string,
vol.Required(ATTR_PIN_LABEL_OR_VALUE): cv.string,
}
)
SERVICE_SET_PIN_SCHEMA = vol.Schema(
{
vol.Required(ATTR_DEVICE_ID): cv.string,
vol.Required(ATTR_PIN_LABEL): cv.string,
vol.Required(ATTR_PIN_VALUE): cv.string,
},
)
SERVICE_SET_SYSTEM_PROPERTIES_SCHEMA = vol.Schema(
{
vol.Required(ATTR_DEVICE_ID): cv.string,
vol.Optional(ATTR_ALARM_DURATION): vol.All(
cv.time_period,
lambda value: value.total_seconds(),
vol.Range(min=MIN_ALARM_DURATION, max=MAX_ALARM_DURATION),
),
vol.Optional(ATTR_ALARM_VOLUME): vol.All(vol.In(VOLUME_MAP), VOLUME_MAP.get),
vol.Optional(ATTR_CHIME_VOLUME): vol.All(vol.In(VOLUME_MAP), VOLUME_MAP.get),
vol.Optional(ATTR_ENTRY_DELAY_AWAY): vol.All(
cv.time_period,
lambda value: value.total_seconds(),
vol.Range(min=MIN_ENTRY_DELAY_AWAY, max=MAX_ENTRY_DELAY_AWAY),
),
vol.Optional(ATTR_ENTRY_DELAY_HOME): vol.All(
cv.time_period,
lambda value: value.total_seconds(),
vol.Range(max=MAX_ENTRY_DELAY_HOME),
),
vol.Optional(ATTR_EXIT_DELAY_AWAY): vol.All(
cv.time_period,
lambda value: value.total_seconds(),
vol.Range(min=MIN_EXIT_DELAY_AWAY, max=MAX_EXIT_DELAY_AWAY),
),
vol.Optional(ATTR_EXIT_DELAY_HOME): vol.All(
cv.time_period,
lambda value: value.total_seconds(),
vol.Range(max=MAX_EXIT_DELAY_HOME),
),
vol.Optional(ATTR_LIGHT): cv.boolean,
vol.Optional(ATTR_VOICE_PROMPT_VOLUME): vol.All(
vol.In(VOLUME_MAP), VOLUME_MAP.get
),
}
)
WEBSOCKET_EVENTS_REQUIRING_SERIAL = [EVENT_LOCK_LOCKED, EVENT_LOCK_UNLOCKED]
WEBSOCKET_EVENTS_TO_FIRE_HASS_EVENT = [
EVENT_AUTOMATIC_TEST,
EVENT_CAMERA_MOTION_DETECTED,
EVENT_DOORBELL_DETECTED,
EVENT_DEVICE_TEST,
EVENT_SECRET_ALERT_TRIGGERED,
EVENT_SENSOR_PAIRED_AND_NAMED,
EVENT_USER_INITIATED_TEST,
]
CONFIG_SCHEMA = cv.removed(DOMAIN, raise_if_present=False)
@callback
def _async_get_system_for_service_call(
hass: HomeAssistant, call: ServiceCall
) -> SystemType:
"""Get the SimpliSafe system related to a service call (by device ID)."""
device_id = call.data[ATTR_DEVICE_ID]
device_registry = dr.async_get(hass)
if (
alarm_control_panel_device_entry := device_registry.async_get(device_id)
) is None:
raise vol.Invalid("Invalid device ID specified")
assert alarm_control_panel_device_entry.via_device_id
if (
base_station_device_entry := device_registry.async_get(
alarm_control_panel_device_entry.via_device_id
)
) is None:
raise ValueError("No base station registered for alarm control panel")
[system_id] = [
identity[1]
for identity in base_station_device_entry.identifiers
if identity[0] == DOMAIN
]
for entry_id in base_station_device_entry.config_entries:
if (simplisafe := hass.data[DOMAIN].get(entry_id)) is None:
continue
return cast(SystemType, simplisafe.systems[system_id])
raise ValueError(f"No system for device ID: {device_id}")
@callback
def _async_log_deprecated_service_call(
hass: HomeAssistant,
call: ServiceCall,
alternate_service: str,
alternate_target: str,
breaks_in_ha_version: str,
) -> None:
"""Log a warning about a deprecated service call."""
deprecated_service = f"{call.domain}.{call.service}"
async_create_issue(
hass,
DOMAIN,
f"deprecated_service_{deprecated_service}",
breaks_in_ha_version=breaks_in_ha_version,
is_fixable=True,
is_persistent=True,
severity=IssueSeverity.WARNING,
translation_key="deprecated_service",
translation_placeholders={
"alternate_service": alternate_service,
"alternate_target": alternate_target,
"deprecated_service": deprecated_service,
},
)
LOGGER.warning(
(
'The "%s" service is deprecated and will be removed in %s; use the "%s" '
'service and pass it a target entity ID of "%s"'
),
deprecated_service,
breaks_in_ha_version,
alternate_service,
alternate_target,
)
@callback
def _async_register_base_station(
hass: HomeAssistant, entry: ConfigEntry, system: SystemType
) -> None:
"""Register a new bridge."""
device_registry = dr.async_get(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, system.system_id)},
manufacturer="SimpliSafe",
model=system.version,
name=system.address,
)
@callback
def _async_standardize_config_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Bring a config entry up to current standards."""
if CONF_TOKEN not in entry.data:
raise ConfigEntryAuthFailed(
"SimpliSafe OAuth standard requires re-authentication"
)
entry_updates = {}
if not entry.unique_id:
# If the config entry doesn't already have a unique ID, set one:
entry_updates["unique_id"] = entry.data[CONF_USERNAME]
if CONF_CODE in entry.data:
# If an alarm code was provided as part of configuration.yaml, pop it out of
# the config entry's data and move it to options:
data = {**entry.data}
entry_updates["data"] = data
entry_updates["options"] = {
**entry.options,
CONF_CODE: data.pop(CONF_CODE),
}
if entry_updates:
hass.config_entries.async_update_entry(entry, **entry_updates)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up SimpliSafe as config entry."""
_async_standardize_config_entry(hass, entry)
_verify_domain_control = verify_domain_control(hass, DOMAIN)
websession = aiohttp_client.async_get_clientsession(hass)
try:
api = await API.async_from_refresh_token(
entry.data[CONF_TOKEN], session=websession
)
except InvalidCredentialsError as err:
raise ConfigEntryAuthFailed from err
except SimplipyError as err:
LOGGER.error("Config entry failed: %s", err)
raise ConfigEntryNotReady from err
simplisafe = SimpliSafe(hass, entry, api)
try:
await simplisafe.async_init()
except SimplipyError as err:
raise ConfigEntryNotReady from err
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = simplisafe
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
@callback
def extract_system(func: Callable) -> Callable:
"""Define a decorator to get the correct system for a service call."""
async def wrapper(call: ServiceCall) -> None:
"""Wrap the service function."""
system = _async_get_system_for_service_call(hass, call)
try:
await func(call, system)
except SimplipyError as err:
raise HomeAssistantError(
f'Error while executing "{call.service}": {err}'
) from err
return wrapper
@_verify_domain_control
@extract_system
async def async_clear_notifications(call: ServiceCall, system: SystemType) -> None:
"""Clear all active notifications."""
_async_log_deprecated_service_call(
hass,
call,
"button.press",
"button.alarm_control_panel_clear_notifications",
"2022.12.0",
)
await system.async_clear_notifications()
@_verify_domain_control
@extract_system
async def async_remove_pin(call: ServiceCall, system: SystemType) -> None:
"""Remove a PIN."""
await system.async_remove_pin(call.data[ATTR_PIN_LABEL_OR_VALUE])
@_verify_domain_control
@extract_system
async def async_set_pin(call: ServiceCall, system: SystemType) -> None:
"""Set a PIN."""
await system.async_set_pin(call.data[ATTR_PIN_LABEL], call.data[ATTR_PIN_VALUE])
@_verify_domain_control
@extract_system
async def async_set_system_properties(
call: ServiceCall, system: SystemType
) -> None:
"""Set one or more system parameters."""
if not isinstance(system, SystemV3):
raise HomeAssistantError("Can only set system properties on V3 systems")
await system.async_set_properties(
{prop: value for prop, value in call.data.items() if prop != ATTR_DEVICE_ID}
)
for service, method, schema in (
(
SERVICE_NAME_CLEAR_NOTIFICATIONS,
async_clear_notifications,
SERVICE_CLEAR_NOTIFICATIONS_SCHEMA,
),
(SERVICE_NAME_REMOVE_PIN, async_remove_pin, SERVICE_REMOVE_PIN_SCHEMA),
(SERVICE_NAME_SET_PIN, async_set_pin, SERVICE_SET_PIN_SCHEMA),
(
SERVICE_NAME_SET_SYSTEM_PROPERTIES,
async_set_system_properties,
SERVICE_SET_SYSTEM_PROPERTIES_SCHEMA,
),
):
if hass.services.has_service(DOMAIN, service):
continue
async_register_admin_service(hass, DOMAIN, service, method, schema=schema)
current_options = {**entry.options}
async def async_reload_entry(_: HomeAssistant, updated_entry: ConfigEntry) -> None:
"""Handle an options update.
This method will get called in two scenarios:
1. When SimpliSafeOptionsFlowHandler is initiated
2. When a new refresh token is saved to the config entry data
We only want #1 to trigger an actual reload.
"""
nonlocal current_options
updated_options = {**updated_entry.options}
if updated_options == current_options:
return
await hass.config_entries.async_reload(entry.entry_id)
entry.async_on_unload(entry.add_update_listener(async_reload_entry))
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a SimpliSafe config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
loaded_entries = [
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.state == ConfigEntryState.LOADED
]
if len(loaded_entries) == 1:
# If this is the last loaded instance of SimpliSafe, deregister any services
# defined during integration setup:
for service_name in SERVICES:
hass.services.async_remove(DOMAIN, service_name)
return unload_ok
class SimpliSafe:
"""Define a SimpliSafe data object."""
def __init__(self, hass: HomeAssistant, entry: ConfigEntry, api: API) -> None:
"""Initialize."""
self._api = api
self._hass = hass
self._system_notifications: dict[int, set[SystemNotification]] = {}
self._websocket_reconnect_task: asyncio.Task | None = None
self.entry = entry
self.initial_event_to_use: dict[int, dict[str, Any]] = {}
self.subscription_data: dict[int, Any] = api.subscription_data
self.systems: dict[int, SystemType] = {}
# This will get filled in by async_init:
self.coordinator: DataUpdateCoordinator | None = None
@callback
def _async_process_new_notifications(self, system: SystemType) -> None:
"""Act on any new system notifications."""
if self._hass.state != CoreState.running:
# If HASS isn't fully running yet, it may cause the SIMPLISAFE_NOTIFICATION
# event to fire before dependent components (like automation) are fully
# ready. If that's the case, skip:
return
latest_notifications = set(system.notifications)
to_add = latest_notifications.difference(
self._system_notifications[system.system_id]
)
if not to_add:
return
LOGGER.debug("New system notifications: %s", to_add)
for notification in to_add:
text = notification.text
if notification.link:
text = f"{text} For more information: {notification.link}"
self._hass.bus.async_fire(
EVENT_SIMPLISAFE_NOTIFICATION,
event_data={
ATTR_CATEGORY: notification.category,
ATTR_CODE: notification.code,
ATTR_MESSAGE: text,
ATTR_TIMESTAMP: notification.timestamp,
},
)
self._system_notifications[system.system_id] = latest_notifications
async def _async_start_websocket_loop(self) -> None:
"""Start a websocket reconnection loop."""
assert self._api.websocket
try:
await self._api.websocket.async_connect()
await self._api.websocket.async_listen()
except asyncio.CancelledError:
LOGGER.debug("Request to cancel websocket loop received")
raise
except WebsocketError as err:
LOGGER.error("Failed to connect to websocket: %s", err)
except Exception as err: # pylint: disable=broad-except
LOGGER.error("Unknown exception while connecting to websocket: %s", err)
LOGGER.info("Reconnecting to websocket")
await self._async_cancel_websocket_loop()
self._websocket_reconnect_task = self._hass.async_create_task(
self._async_start_websocket_loop()
)
async def _async_cancel_websocket_loop(self) -> None:
"""Stop any existing websocket reconnection loop."""
if self._websocket_reconnect_task:
self._websocket_reconnect_task.cancel()
try:
await self._websocket_reconnect_task
except asyncio.CancelledError:
LOGGER.debug("Websocket reconnection task successfully canceled")
self._websocket_reconnect_task = None
assert self._api.websocket
await self._api.websocket.async_disconnect()
@callback
def _async_websocket_on_event(self, event: WebsocketEvent) -> None:
"""Define a callback for receiving a websocket event."""
LOGGER.debug("New websocket event: %s", event)
async_dispatcher_send(
self._hass, DISPATCHER_TOPIC_WEBSOCKET_EVENT.format(event.system_id), event
)
if event.event_type not in WEBSOCKET_EVENTS_TO_FIRE_HASS_EVENT:
return
sensor_type: str | None
if event.sensor_type:
sensor_type = event.sensor_type.name
else:
sensor_type = None
self._hass.bus.async_fire(
EVENT_SIMPLISAFE_EVENT,
event_data={
ATTR_LAST_EVENT_CHANGED_BY: event.changed_by,
ATTR_LAST_EVENT_TYPE: event.event_type,
ATTR_LAST_EVENT_INFO: event.info,
ATTR_LAST_EVENT_SENSOR_NAME: event.sensor_name,
ATTR_LAST_EVENT_SENSOR_SERIAL: event.sensor_serial,
ATTR_LAST_EVENT_SENSOR_TYPE: sensor_type,
ATTR_SYSTEM_ID: event.system_id,
ATTR_LAST_EVENT_TIMESTAMP: event.timestamp,
},
)
async def async_init(self) -> None:
"""Initialize the SimpliSafe "manager" class."""
assert self._api.refresh_token
assert self._api.websocket
self._api.websocket.add_event_callback(self._async_websocket_on_event)
self._websocket_reconnect_task = asyncio.create_task(
self._async_start_websocket_loop()
)
async def async_websocket_disconnect_listener(_: Event) -> None:
"""Define an event handler to disconnect from the websocket."""
assert self._api.websocket
await self._async_cancel_websocket_loop()
self.entry.async_on_unload(
self._hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, async_websocket_disconnect_listener
)
)
self.systems = await self._api.async_get_systems()
for system in self.systems.values():
self._system_notifications[system.system_id] = set()
_async_register_base_station(self._hass, self.entry, system)
# Future events will come from the websocket, but since subscription to the
# websocket doesn't provide the most recent event, we grab it from the REST
# API to ensure event-related attributes aren't empty on startup:
try:
self.initial_event_to_use[
system.system_id
] = await system.async_get_latest_event()
except SimplipyError as err:
LOGGER.error("Error while fetching initial event: %s", err)
self.initial_event_to_use[system.system_id] = {}
self.coordinator = DataUpdateCoordinator(
self._hass,
LOGGER,
name=self.entry.title,
update_interval=DEFAULT_SCAN_INTERVAL,
update_method=self.async_update,
)
@callback
def async_save_refresh_token(token: str) -> None:
"""Save a refresh token to the config entry."""
LOGGER.info("Saving new refresh token to HASS storage")
self._hass.config_entries.async_update_entry(
self.entry,
data={**self.entry.data, CONF_TOKEN: token},
)
async def async_handle_refresh_token(token: str) -> None:
"""Handle a new refresh token."""
async_save_refresh_token(token)
# Open a new websocket connection with the fresh token:
assert self._api.websocket
await self._async_cancel_websocket_loop()
self._websocket_reconnect_task = self._hass.async_create_task(
self._async_start_websocket_loop()
)
self.entry.async_on_unload(
self._api.add_refresh_token_callback(async_handle_refresh_token)
)
# Save the refresh token we got on entry setup:
async_save_refresh_token(self._api.refresh_token)
async def async_update(self) -> None:
"""Get updated data from SimpliSafe."""
async def async_update_system(system: SystemType) -> None:
"""Update a system."""
await system.async_update(cached=system.version != 3)
self._async_process_new_notifications(system)
tasks = [async_update_system(system) for system in self.systems.values()]
results = await asyncio.gather(*tasks, return_exceptions=True)
for result in results:
if isinstance(result, InvalidCredentialsError):
raise ConfigEntryAuthFailed("Invalid credentials") from result
if isinstance(result, EndpointUnavailableError):
# In case the user attempts an action not allowed in their current plan,
# we merely log that message at INFO level (so the user is aware,
# but not spammed with ERROR messages that they cannot change):
LOGGER.info(result)
if isinstance(result, SimplipyError):
raise UpdateFailed(f"SimpliSafe error while updating: {result}")
class SimpliSafeEntity(CoordinatorEntity):
"""Define a base SimpliSafe entity."""
_attr_has_entity_name = True
def __init__(
self,
simplisafe: SimpliSafe,
system: SystemType,
*,
device: Device | None = None,
additional_websocket_events: Iterable[str] | None = None,
) -> None:
"""Initialize."""
assert simplisafe.coordinator
super().__init__(simplisafe.coordinator)
# SimpliSafe can incorrectly return an error state when there isn't any
# error. This can lead to entities having an unknown state frequently.
# To protect against that, we measure an error count for each entity and only
# mark the state as unavailable if we detect a few in a row:
self._error_count = 0
if device:
model = device.type.name.capitalize().replace("_", " ")
device_name = f"{device.name.capitalize()} {model}"
serial = device.serial
else:
model = device_name = DEFAULT_ENTITY_MODEL
serial = system.serial
event = simplisafe.initial_event_to_use[system.system_id]
if raw_type := event.get("sensorType"):
try:
device_type = DeviceTypes(raw_type)
except ValueError:
device_type = DeviceTypes.UNKNOWN
else:
device_type = DeviceTypes.UNKNOWN
self._attr_extra_state_attributes = {
ATTR_LAST_EVENT_INFO: event.get("info"),
ATTR_LAST_EVENT_SENSOR_NAME: event.get("sensorName"),
ATTR_LAST_EVENT_SENSOR_TYPE: device_type.name.lower(),
ATTR_LAST_EVENT_TIMESTAMP: event.get("eventTimestamp"),
ATTR_SYSTEM_ID: system.system_id,
}
self._attr_device_info = DeviceInfo(
configuration_url=DEFAULT_CONFIG_URL,
identifiers={(DOMAIN, serial)},
manufacturer="SimpliSafe",
model=model,
name=device_name,
via_device=(DOMAIN, system.system_id),
)
self._attr_unique_id = serial
self._device = device
self._online = True
self._simplisafe = simplisafe
self._system = system
self._websocket_events_to_listen_for = [
EVENT_CONNECTION_LOST,
EVENT_CONNECTION_RESTORED,
EVENT_POWER_OUTAGE,
EVENT_POWER_RESTORED,
]
if additional_websocket_events:
self._websocket_events_to_listen_for += additional_websocket_events
@property
def available(self) -> bool:
"""Return whether the entity is available."""
# We can easily detect if the V3 system is offline, but no simple check exists
# for the V2 system. Therefore, assuming the coordinator hasn't failed, we mark
# the entity as available if:
# 1. We can verify that the system is online (assuming True if we can't)
# 2. We can verify that the entity is online
if isinstance(self._system, SystemV3):
system_offline = self._system.offline
else:
system_offline = False
return (
self._error_count < DEFAULT_ERROR_THRESHOLD
and self._online
and not system_offline
)
@callback
def _handle_coordinator_update(self) -> None:
"""Update the entity with new REST API data."""
if self.coordinator.last_update_success:
self.async_reset_error_count()
else:
self.async_increment_error_count()
self.async_update_from_rest_api()
self.async_write_ha_state()
@callback
def _handle_websocket_update(self, event: WebsocketEvent) -> None:
"""Update the entity with new websocket data."""
# Ignore this event if it belongs to a system other than this one:
if event.system_id != self._system.system_id:
return
# Ignore this event if this entity hasn't expressed interest in its type:
if event.event_type not in self._websocket_events_to_listen_for:
return
# Ignore this event if it belongs to a entity with a different serial
# number from this one's:
if (
self._device
and event.event_type in WEBSOCKET_EVENTS_REQUIRING_SERIAL
and event.sensor_serial != self._device.serial
):
return
sensor_type: str | None
if event.sensor_type:
sensor_type = event.sensor_type.name
else:
sensor_type = None
self._attr_extra_state_attributes.update(
{
ATTR_LAST_EVENT_INFO: event.info,
ATTR_LAST_EVENT_SENSOR_NAME: event.sensor_name,
ATTR_LAST_EVENT_SENSOR_TYPE: sensor_type,
ATTR_LAST_EVENT_TIMESTAMP: event.timestamp,
}
)
# It's unknown whether these events reach the base station (since the connection
# is lost); we include this for completeness and coverage:
if event.event_type in (EVENT_CONNECTION_LOST, EVENT_POWER_OUTAGE):
self._online = False
return
# If the base station comes back online, set entities to available, but don't
# instruct the entities to update their state (since there won't be anything new
# until the next websocket event or REST API update:
if event.event_type in (EVENT_CONNECTION_RESTORED, EVENT_POWER_RESTORED):
self._online = True
return
self.async_update_from_websocket_event(event)
self.async_write_ha_state()
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
DISPATCHER_TOPIC_WEBSOCKET_EVENT.format(self._system.system_id),
self._handle_websocket_update,
)
)
self.async_update_from_rest_api()
@callback
def async_increment_error_count(self) -> None:
"""Increment this entity's error count."""
LOGGER.debug('Error for entity "%s" (total: %s)', self.name, self._error_count)
self._error_count += 1
@callback
def async_reset_error_count(self) -> None:
"""Reset this entity's error count."""
if self._error_count == 0:
return
LOGGER.debug('Resetting error count for "%s"', self.name)
self._error_count = 0
@callback
def async_update_from_rest_api(self) -> None:
"""Update the entity when new data comes from the REST API."""
@callback
def async_update_from_websocket_event(self, event: WebsocketEvent) -> None:
"""Update the entity when new data comes from the websocket."""
| {
"content_hash": "a22c171ec1c9626e767f13f019021afb",
"timestamp": "",
"source": "github",
"line_count": 893,
"max_line_length": 88,
"avg_line_length": 34.347144456886895,
"alnum_prop": 0.6258150756390193,
"repo_name": "nkgilley/home-assistant",
"id": "53aa9e84054214b6ec483f0dda33187a7fbdb343",
"size": "30672",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/simplisafe/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_enable_monitoring_request(
resource_group_name: str, cluster_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/clustermonitoring",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_monitoring_status_request(
resource_group_name: str, cluster_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/clustermonitoring",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_disable_monitoring_request(
resource_group_name: str, cluster_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/clustermonitoring",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_enable_azure_monitor_request(
resource_group_name: str, cluster_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/azureMonitor",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_azure_monitor_status_request(
resource_group_name: str, cluster_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/azureMonitor",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_disable_azure_monitor_request(
resource_group_name: str, cluster_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/azureMonitor",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_request(
resource_group_name: str, cluster_name: str, extension_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/{extensionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"),
"extensionName": _SERIALIZER.url("extension_name", extension_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str, cluster_name: str, extension_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/{extensionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"),
"extensionName": _SERIALIZER.url("extension_name", extension_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, cluster_name: str, extension_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/{extensionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"),
"extensionName": _SERIALIZER.url("extension_name", extension_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_azure_async_operation_status_request(
resource_group_name: str,
cluster_name: str,
extension_name: str,
operation_id: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/{extensionName}/azureAsyncOperations/{operationId}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"clusterName": _SERIALIZER.url("cluster_name", cluster_name, "str"),
"extensionName": _SERIALIZER.url("extension_name", extension_name, "str"),
"operationId": _SERIALIZER.url("operation_id", operation_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class ExtensionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.hdinsight.HDInsightManagementClient`'s
:attr:`extensions` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
def _enable_monitoring_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cluster_name: str,
parameters: Union[_models.ClusterMonitoringRequest, IO],
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[None] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ClusterMonitoringRequest")
request = build_enable_monitoring_request(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._enable_monitoring_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_enable_monitoring_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/clustermonitoring"
}
@overload
def begin_enable_monitoring(
self,
resource_group_name: str,
cluster_name: str,
parameters: _models.ClusterMonitoringRequest,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[None]:
"""Enables the Operations Management Suite (OMS) on the HDInsight cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param cluster_name: The name of the cluster. Required.
:type cluster_name: str
:param parameters: The Operations Management Suite (OMS) workspace parameters. Required.
:type parameters: ~azure.mgmt.hdinsight.models.ClusterMonitoringRequest
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_enable_monitoring(
self,
resource_group_name: str,
cluster_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[None]:
"""Enables the Operations Management Suite (OMS) on the HDInsight cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param cluster_name: The name of the cluster. Required.
:type cluster_name: str
:param parameters: The Operations Management Suite (OMS) workspace parameters. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_enable_monitoring(
self,
resource_group_name: str,
cluster_name: str,
parameters: Union[_models.ClusterMonitoringRequest, IO],
**kwargs: Any
) -> LROPoller[None]:
"""Enables the Operations Management Suite (OMS) on the HDInsight cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param cluster_name: The name of the cluster. Required.
:type cluster_name: str
:param parameters: The Operations Management Suite (OMS) workspace parameters. Is either a
model type or a IO type. Required.
:type parameters: ~azure.mgmt.hdinsight.models.ClusterMonitoringRequest or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._enable_monitoring_initial( # type: ignore
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
)
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_enable_monitoring.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/clustermonitoring"
}
@distributed_trace
def get_monitoring_status(
self, resource_group_name: str, cluster_name: str, **kwargs: Any
) -> _models.ClusterMonitoringResponse:
"""Gets the status of Operations Management Suite (OMS) on the HDInsight cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param cluster_name: The name of the cluster. Required.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ClusterMonitoringResponse or the result of cls(response)
:rtype: ~azure.mgmt.hdinsight.models.ClusterMonitoringResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ClusterMonitoringResponse] = kwargs.pop("cls", None)
request = build_get_monitoring_status_request(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_monitoring_status.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ClusterMonitoringResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_monitoring_status.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/clustermonitoring"
}
def _disable_monitoring_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, cluster_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_disable_monitoring_request(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._disable_monitoring_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_disable_monitoring_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/clustermonitoring"
}
@distributed_trace
def begin_disable_monitoring(self, resource_group_name: str, cluster_name: str, **kwargs: Any) -> LROPoller[None]:
"""Disables the Operations Management Suite (OMS) on the HDInsight cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param cluster_name: The name of the cluster. Required.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._disable_monitoring_initial( # type: ignore
resource_group_name=resource_group_name,
cluster_name=cluster_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
)
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_disable_monitoring.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/clustermonitoring"
}
def _enable_azure_monitor_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cluster_name: str,
parameters: Union[_models.AzureMonitorRequest, IO],
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[None] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "AzureMonitorRequest")
request = build_enable_azure_monitor_request(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._enable_azure_monitor_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_enable_azure_monitor_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/azureMonitor"
}
@overload
def begin_enable_azure_monitor(
self,
resource_group_name: str,
cluster_name: str,
parameters: _models.AzureMonitorRequest,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[None]:
"""Enables the Azure Monitor on the HDInsight cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param cluster_name: The name of the cluster. Required.
:type cluster_name: str
:param parameters: The Log Analytics workspace parameters. Required.
:type parameters: ~azure.mgmt.hdinsight.models.AzureMonitorRequest
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_enable_azure_monitor(
self,
resource_group_name: str,
cluster_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[None]:
"""Enables the Azure Monitor on the HDInsight cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param cluster_name: The name of the cluster. Required.
:type cluster_name: str
:param parameters: The Log Analytics workspace parameters. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_enable_azure_monitor(
self,
resource_group_name: str,
cluster_name: str,
parameters: Union[_models.AzureMonitorRequest, IO],
**kwargs: Any
) -> LROPoller[None]:
"""Enables the Azure Monitor on the HDInsight cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param cluster_name: The name of the cluster. Required.
:type cluster_name: str
:param parameters: The Log Analytics workspace parameters. Is either a model type or a IO type.
Required.
:type parameters: ~azure.mgmt.hdinsight.models.AzureMonitorRequest or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._enable_azure_monitor_initial( # type: ignore
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
)
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_enable_azure_monitor.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/azureMonitor"
}
@distributed_trace
def get_azure_monitor_status(
self, resource_group_name: str, cluster_name: str, **kwargs: Any
) -> _models.AzureMonitorResponse:
"""Gets the status of Azure Monitor on the HDInsight cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param cluster_name: The name of the cluster. Required.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AzureMonitorResponse or the result of cls(response)
:rtype: ~azure.mgmt.hdinsight.models.AzureMonitorResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.AzureMonitorResponse] = kwargs.pop("cls", None)
request = build_get_azure_monitor_status_request(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_azure_monitor_status.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("AzureMonitorResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_azure_monitor_status.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/azureMonitor"
}
def _disable_azure_monitor_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, cluster_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_disable_azure_monitor_request(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._disable_azure_monitor_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_disable_azure_monitor_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/azureMonitor"
}
@distributed_trace
def begin_disable_azure_monitor(
self, resource_group_name: str, cluster_name: str, **kwargs: Any
) -> LROPoller[None]:
"""Disables the Azure Monitor on the HDInsight cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param cluster_name: The name of the cluster. Required.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._disable_azure_monitor_initial( # type: ignore
resource_group_name=resource_group_name,
cluster_name=cluster_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
)
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_disable_azure_monitor.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/azureMonitor"
}
def _create_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cluster_name: str,
extension_name: str,
parameters: Union[_models.Extension, IO],
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[None] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Extension")
request = build_create_request(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
extension_name=extension_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_create_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/{extensionName}"
}
@overload
def begin_create(
self,
resource_group_name: str,
cluster_name: str,
extension_name: str,
parameters: _models.Extension,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[None]:
"""Creates an HDInsight cluster extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param cluster_name: The name of the cluster. Required.
:type cluster_name: str
:param extension_name: The name of the cluster extension. Required.
:type extension_name: str
:param parameters: The cluster extensions create request. Required.
:type parameters: ~azure.mgmt.hdinsight.models.Extension
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create(
self,
resource_group_name: str,
cluster_name: str,
extension_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[None]:
"""Creates an HDInsight cluster extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param cluster_name: The name of the cluster. Required.
:type cluster_name: str
:param extension_name: The name of the cluster extension. Required.
:type extension_name: str
:param parameters: The cluster extensions create request. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create(
self,
resource_group_name: str,
cluster_name: str,
extension_name: str,
parameters: Union[_models.Extension, IO],
**kwargs: Any
) -> LROPoller[None]:
"""Creates an HDInsight cluster extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param cluster_name: The name of the cluster. Required.
:type cluster_name: str
:param extension_name: The name of the cluster extension. Required.
:type extension_name: str
:param parameters: The cluster extensions create request. Is either a model type or a IO type.
Required.
:type parameters: ~azure.mgmt.hdinsight.models.Extension or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._create_initial( # type: ignore
resource_group_name=resource_group_name,
cluster_name=cluster_name,
extension_name=extension_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
)
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/{extensionName}"
}
@distributed_trace
def get(
self, resource_group_name: str, cluster_name: str, extension_name: str, **kwargs: Any
) -> _models.ClusterMonitoringResponse:
"""Gets the extension properties for the specified HDInsight cluster extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param cluster_name: The name of the cluster. Required.
:type cluster_name: str
:param extension_name: The name of the cluster extension. Required.
:type extension_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ClusterMonitoringResponse or the result of cls(response)
:rtype: ~azure.mgmt.hdinsight.models.ClusterMonitoringResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ClusterMonitoringResponse] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
extension_name=extension_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ClusterMonitoringResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/{extensionName}"
}
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, cluster_name: str, extension_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
extension_name=extension_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/{extensionName}"
}
@distributed_trace
def begin_delete(
self, resource_group_name: str, cluster_name: str, extension_name: str, **kwargs: Any
) -> LROPoller[None]:
"""Deletes the specified extension for HDInsight cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param cluster_name: The name of the cluster. Required.
:type cluster_name: str
:param extension_name: The name of the cluster extension. Required.
:type extension_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
cluster_name=cluster_name,
extension_name=extension_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
)
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/{extensionName}"
}
@distributed_trace
def get_azure_async_operation_status(
self, resource_group_name: str, cluster_name: str, extension_name: str, operation_id: str, **kwargs: Any
) -> _models.AsyncOperationResult:
"""Gets the async operation status.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param cluster_name: The name of the cluster. Required.
:type cluster_name: str
:param extension_name: The name of the cluster extension. Required.
:type extension_name: str
:param operation_id: The long running operation id. Required.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AsyncOperationResult or the result of cls(response)
:rtype: ~azure.mgmt.hdinsight.models.AsyncOperationResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.AsyncOperationResult] = kwargs.pop("cls", None)
request = build_get_azure_async_operation_status_request(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
extension_name=extension_name,
operation_id=operation_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_azure_async_operation_status.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("AsyncOperationResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_azure_async_operation_status.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/extensions/{extensionName}/azureAsyncOperations/{operationId}"
}
| {
"content_hash": "3508dd69a00920fd1be3e4fce4dd680c",
"timestamp": "",
"source": "github",
"line_count": 1664,
"max_line_length": 198,
"avg_line_length": 46.00420673076923,
"alnum_prop": 0.6472808977021854,
"repo_name": "Azure/azure-sdk-for-python",
"id": "a7d2ada008297290be94288cf5bc0a65e6935d34",
"size": "77051",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/hdinsight/azure-mgmt-hdinsight/azure/mgmt/hdinsight/operations/_extensions_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import unittest
from django.core.exceptions import ImproperlyConfigured
from django.db import ProgrammingError
try:
from django.contrib.gis.db.backends.postgis.operations import PostGISOperations
HAS_POSTGRES = True
except ImportError:
HAS_POSTGRES = False
except ImproperlyConfigured as e:
# If psycopg is installed but not geos, the import path hits
# django.contrib.gis.geometry.backend which will "helpfully" convert
# an ImportError into an ImproperlyConfigured.
# Here, we make sure we're only catching this specific case and not another
# ImproperlyConfigured one.
if e.args and e.args[0].startswith('Could not import user-defined GEOMETRY_BACKEND'):
HAS_POSTGRES = False
else:
raise
if HAS_POSTGRES:
class FakeConnection:
def __init__(self):
self.settings_dict = {
'NAME': 'test',
}
class FakePostGISOperations(PostGISOperations):
def __init__(self, version=None):
self.version = version
self.connection = FakeConnection()
def _get_postgis_func(self, func):
if func == 'postgis_lib_version':
if self.version is None:
raise ProgrammingError
else:
return self.version
elif func == 'version':
pass
else:
raise NotImplementedError('This function was not expected to be called')
@unittest.skipUnless(HAS_POSTGRES, "The psycopg2 driver is needed for these tests")
class TestPostGISVersionCheck(unittest.TestCase):
"""
The PostGIS version check parses correctly the version numbers
"""
def test_get_version(self):
expect = '1.0.0'
ops = FakePostGISOperations(expect)
actual = ops.postgis_lib_version()
self.assertEqual(expect, actual)
def test_version_classic_tuple(self):
expect = ('1.2.3', 1, 2, 3)
ops = FakePostGISOperations(expect[0])
actual = ops.postgis_version_tuple()
self.assertEqual(expect, actual)
def test_version_dev_tuple(self):
expect = ('1.2.3dev', 1, 2, 3)
ops = FakePostGISOperations(expect[0])
actual = ops.postgis_version_tuple()
self.assertEqual(expect, actual)
def test_version_loose_tuple(self):
expect = ('1.2.3b1.dev0', 1, 2, 3)
ops = FakePostGISOperations(expect[0])
actual = ops.postgis_version_tuple()
self.assertEqual(expect, actual)
def test_valid_version_numbers(self):
versions = [
('1.3.0', 1, 3, 0),
('2.1.1', 2, 1, 1),
('2.2.0dev', 2, 2, 0),
]
for version in versions:
with self.subTest(version=version):
ops = FakePostGISOperations(version[0])
actual = ops.spatial_version
self.assertEqual(version[1:], actual)
def test_no_version_number(self):
ops = FakePostGISOperations()
with self.assertRaises(ImproperlyConfigured):
ops.spatial_version
def test_version_dependent_funcs(self):
"""
Resolve names of functions renamed and deprecated in PostGIS 2.2.0
depending on PostGIS version.
Remove when dropping support for PostGIS 2.1.
"""
ops = FakePostGISOperations('2.2.0')
self.assertEqual(ops.spatial_function_name('DistanceSphere'), 'ST_DistanceSphere')
self.assertEqual(ops.spatial_function_name('DistanceSpheroid'), 'ST_DistanceSpheroid')
self.assertEqual(ops.spatial_function_name('LengthSpheroid'), 'ST_LengthSpheroid')
self.assertEqual(ops.spatial_function_name('MemSize'), 'ST_MemSize')
ops = FakePostGISOperations('2.1.0')
self.assertEqual(ops.spatial_function_name('DistanceSphere'), 'ST_distance_sphere')
self.assertEqual(ops.spatial_function_name('DistanceSpheroid'), 'ST_distance_spheroid')
self.assertEqual(ops.spatial_function_name('LengthSpheroid'), 'ST_length_spheroid')
self.assertEqual(ops.spatial_function_name('MemSize'), 'ST_mem_size')
| {
"content_hash": "75716c39fcc355e27a6f79223bbd5a59",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 95,
"avg_line_length": 37.270270270270274,
"alnum_prop": 0.6323422770123278,
"repo_name": "tysonclugg/django",
"id": "2f55406f56ae65fea9f0edead8f06b0e1875342d",
"size": "4137",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tests/gis_tests/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52492"
},
{
"name": "HTML",
"bytes": "173554"
},
{
"name": "JavaScript",
"bytes": "451010"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11934829"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import print_function
import sys, codecs
sys.stdout = codecs.getwriter("utf-8")(sys.stdout)
import site
site.addsitedir("..\Lib")
from ChineseUtilities import ChineseDB, SortStringDB
import datafiles, os
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# Checking for Sort File covering all of XD dictionary
OldSortDB = SortStringDB(os.path.join(datafiles.datapath, r"Archive\ch2sort_2004(utf8).txt"))
SortDB = SortStringDB()
print("Checking %s against sort file %s" % (OldSortDB.FileName, SortDB.FileName))
notOk = missingComposed = 0
for e in sorted(OldSortDB.items()):
## print(e)
try:
sort = SortDB[e[0]]
except KeyError:
print("Not in latest DB:", repr(e[0]))
continue
if sort != e[1]:
if len(sort) < len(e[1]):
notOk += 1
print(e[0], list(sort.keys()), "!=", list(e[1].keys()))
for i in ['\u602b', '\u602b\u7136']: print(i)
##print("Dictionary entries =", len(HZdict))
##print("Sort key entries =", len(SortDB))
##print("\tMissing composed characters (ignored) =", missingComposed)
##print("\tKnown length mismatches (ignored) =", len(IgnoreErrors))
##print()
print("\tUnknown errors =", notOk)
##
##
| {
"content_hash": "f59c9d5d318e37191c38bf04e7f7df0f",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 93,
"avg_line_length": 27.875,
"alnum_prop": 0.5889387144992526,
"repo_name": "rmlockwood/FLExTrans",
"id": "23f290ed0224f41f132aee67b329cd5921b0710e",
"size": "1559",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "FlexTools2.1/FlexTools/Modules/Chinese/Utilities/check_oldsort_vs_pkl sort.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "12781"
},
{
"name": "CSS",
"bytes": "45823"
},
{
"name": "HTML",
"bytes": "827329"
},
{
"name": "Makefile",
"bytes": "7353"
},
{
"name": "NSIS",
"bytes": "12387"
},
{
"name": "Python",
"bytes": "4442196"
},
{
"name": "VBScript",
"bytes": "2068"
}
],
"symlink_target": ""
} |
"""
Plugin responsible for post-installation configuration
"""
from packstack.installer import utils
from packstack.installer import basedefs
# ------------- Postscript Packstack Plugin Initialization --------------
PLUGIN_NAME = "Postscript"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
def initConfig(controller):
group = {"GROUP_NAME": "POSTSCRIPT",
"DESCRIPTION": "POSTSCRIPT Config parameters",
"PRE_CONDITION": lambda x: 'yes',
"PRE_CONDITION_MATCH": "yes",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True}
controller.addGroup(group, [])
def initSequences(controller):
config = controller.CONF
postscript_steps = []
if (config['CONFIG_PROVISION_TEMPEST'] == "y" and
config['CONFIG_RUN_TEMPEST'] == "y"):
postscript_steps.append(
{'title': 'Running Tempest',
'functions': [run_tempest]}
)
controller.addSequence("Running post install scripts", [], [],
postscript_steps)
# -------------------------- step functions --------------------------
def run_tempest(config, messages):
logfile = basedefs.DIR_LOG + "/tempest.log"
print("Running Tempest on %s" % config['CONFIG_TEMPEST_HOST'])
server = utils.ScriptRunner(config['CONFIG_TEMPEST_HOST'])
server.append('pushd /var/lib/tempest')
server.append('tempest run --regex \'(%s)\' --black-regex \'%s\' --concurrency 2 > %s'
% (config['CONFIG_RUN_TEMPEST_TESTS'].replace(' ', '|'),
config['CONFIG_SKIP_TEMPEST_TESTS'].replace(' ', '|'),
logfile))
server.append('popd')
server.execute()
| {
"content_hash": "196c1f57012c2f63926c8e5fd1dab228",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 91,
"avg_line_length": 34.5,
"alnum_prop": 0.5785507246376812,
"repo_name": "mahak/packstack",
"id": "a6e02f21ee7329d6598d8efd2f9d4091bcc45b74",
"size": "2295",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "packstack/plugins/postscript_951.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Puppet",
"bytes": "170058"
},
{
"name": "Python",
"bytes": "511156"
},
{
"name": "Ruby",
"bytes": "11197"
},
{
"name": "Shell",
"bytes": "27842"
}
],
"symlink_target": ""
} |
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Group
from django.utils.safestring import mark_safe
from authority import permissions, get_choices_for
from authority.models import Permission
from authority.compat import get_user_model
User = get_user_model()
class BasePermissionForm(forms.ModelForm):
codename = forms.CharField(label=_('Permission'))
class Meta:
model = Permission
exclude = []
def __init__(self, perm=None, obj=None, approved=False, *args, **kwargs):
self.perm = perm
self.obj = obj
self.approved = approved
if obj and perm:
self.base_fields['codename'].widget = forms.HiddenInput()
elif obj and (not perm or not approved):
perms = get_choices_for(self.obj)
self.base_fields['codename'].widget = forms.Select(choices=perms)
super(BasePermissionForm, self).__init__(*args, **kwargs)
def save(self, request, commit=True, *args, **kwargs):
self.instance.creator = request.user
self.instance.content_type = ContentType.objects.get_for_model(self.obj)
self.instance.object_id = self.obj.id
self.instance.codename = self.perm
self.instance.approved = self.approved
return super(BasePermissionForm, self).save(commit)
class UserPermissionForm(BasePermissionForm):
user = forms.CharField(label=_('User'))
class Meta(BasePermissionForm.Meta):
fields = ('user',)
def __init__(self, *args, **kwargs):
if not kwargs.get('approved', False):
self.base_fields['user'].widget = forms.HiddenInput()
super(UserPermissionForm, self).__init__(*args, **kwargs)
def clean_user(self):
username = self.cleaned_data["user"]
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
raise forms.ValidationError(
mark_safe(_("A user with that username does not exist.")))
check = permissions.BasePermission(user=user)
error_msg = None
if user.is_superuser:
error_msg = _("The user %(user)s do not need to request "
"access to any permission as it is a super user.")
elif check.has_perm(self.perm, self.obj):
error_msg = _("The user %(user)s already has the permission "
"'%(perm)s' for %(object_name)s '%(obj)s'")
elif check.requested_perm(self.perm, self.obj):
error_msg = _("The user %(user)s already requested the permission"
" '%(perm)s' for %(object_name)s '%(obj)s'")
if error_msg:
error_msg = error_msg % {
'object_name': self.obj._meta.object_name.lower(),
'perm': self.perm,
'obj': self.obj,
'user': user,
}
raise forms.ValidationError(mark_safe(error_msg))
return user
class GroupPermissionForm(BasePermissionForm):
group = forms.CharField(label=_('Group'))
class Meta(BasePermissionForm.Meta):
fields = ('group',)
def clean_group(self):
groupname = self.cleaned_data["group"]
try:
group = Group.objects.get(name__iexact=groupname)
except Group.DoesNotExist:
raise forms.ValidationError(
mark_safe(_("A group with that name does not exist.")))
check = permissions.BasePermission(group=group)
if check.has_perm(self.perm, self.obj):
raise forms.ValidationError(mark_safe(
_("This group already has the permission '%(perm)s' "
"for %(object_name)s '%(obj)s'") % {
'perm': self.perm,
'object_name': self.obj._meta.object_name.lower(),
'obj': self.obj,
}))
return group
| {
"content_hash": "24c20923e9fbd6481b49a58a3a3b5f9e",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 80,
"avg_line_length": 38.65384615384615,
"alnum_prop": 0.5972636815920398,
"repo_name": "mythmon/kitsune",
"id": "03a93dd19e1ba04c93eaf06c605302439d20edd9",
"size": "4020",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "authority/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "281386"
},
{
"name": "HTML",
"bytes": "624493"
},
{
"name": "JavaScript",
"bytes": "750034"
},
{
"name": "Python",
"bytes": "2721930"
},
{
"name": "Shell",
"bytes": "10281"
},
{
"name": "Smarty",
"bytes": "2062"
}
],
"symlink_target": ""
} |
"""Browser steps for Gherkin"""
import os
from contextlib import contextmanager
from aloe import around, world
from selenium import webdriver
from xvfbwrapper import Xvfb
@around.all
@contextmanager
def with_browser():
"""Start a browser for the tests."""
if 'XVFB' in os.environ:
world.vdisplay = Xvfb(width=1200, height=800)
world.vdisplay.start()
world.browser = create_browser()
yield
world.browser.quit()
delattr(world, 'browser')
if hasattr(world, 'vdisplay'):
world.vdisplay.stop()
def browser_type():
"""Browser type selected for the tests."""
return os.environ.get('BROWSER_TYPE', 'firefox')
def custom_chrome():
"""Start Chrome with custom options."""
options = webdriver.ChromeOptions()
options.add_experimental_option('prefs', {
'credentials_enable_service': False,
'profile': {
'password_manager_enabled': False,
},
})
return webdriver.Chrome(chrome_options=options)
def create_browser():
"""Create a Selenium browser for tests."""
if 'SELENIUM_ADDRESS' in os.environ:
address = 'http://{}/wd/hub'.format(os.environ['SELENIUM_ADDRESS'])
capabilities = {
'chrome': webdriver.DesiredCapabilities.CHROME,
'firefox': webdriver.DesiredCapabilities.FIREFOX,
'edge': webdriver.DesiredCapabilities.EDGE,
'ie': webdriver.DesiredCapabilities.INTERNETEXPLORER,
'phantomjs': webdriver.DesiredCapabilities.PHANTOMJS,
}
try:
browser = capabilities[browser_type()]
except KeyError:
raise ValueError("Invalid BROWSER_TYPE.")
return webdriver.Remote(
address,
desired_capabilities=browser,
)
else:
browsers = {
'chrome': custom_chrome,
'firefox': webdriver.Firefox,
'phantomjs': webdriver.PhantomJS,
}
driver = browsers[browser_type()]
# Explicitly specify the browser locale for the date input tests to work
# regardless of the user's settings.
old_lc_all = os.environ.get('LC_ALL', '')
try:
os.environ['LC_ALL'] = 'en_US'
return driver()
finally:
os.environ['LC_ALL'] = old_lc_all
| {
"content_hash": "ec27d174a9fa86c528c76cafa1f1d263",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 80,
"avg_line_length": 26.179775280898877,
"alnum_prop": 0.6094420600858369,
"repo_name": "jricardo27/holiday_planner",
"id": "8562f22ff084b9515ff6114fec6255f105df9da5",
"size": "2330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "holiday_planner/features/steps/browser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1518"
},
{
"name": "Gherkin",
"bytes": "839"
},
{
"name": "HTML",
"bytes": "36302"
},
{
"name": "JavaScript",
"bytes": "19616"
},
{
"name": "Python",
"bytes": "98712"
},
{
"name": "Shell",
"bytes": "4362"
}
],
"symlink_target": ""
} |
"Python and Django compatibility functions."
from __future__ import unicode_literals
from django.conf import settings
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
try:
from django.contrib.auth import get_user_model
except ImportError: # pragma: no cover
# Django < 1.5
from django.contrib.auth.models import User
User.USERNAME_FIELD = 'username'
get_user_model = lambda: User
# urllib
try:
from urllib.parse import urlencode, parse_qs, urlparse
except ImportError: # pragma: no cover
# Python 2.X
from urllib import urlencode
from urlparse import parse_qs, urlparse
try:
from django.utils.encoding import force_text, smart_bytes, force_bytes
except ImportError: # pragma: no cover
from django.utils.encoding import force_unicode as force_text
from django.utils.encoding import smart_str as smart_bytes
try:
from django.utils.encoding import force_str as force_bytes
except ImportError:
# This didn't get back-ported to 1.4.X
force_bytes = smart_bytes
try: # pragma: no cover
from google.appengine.ext import db
APPENGINE = True
except ImportError:
APPENGINE = False
| {
"content_hash": "cb19a8055bf292d23a080fcf78c8efa7",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 74,
"avg_line_length": 27.09090909090909,
"alnum_prop": 0.7189597315436241,
"repo_name": "vyscond/django-all-access",
"id": "615f5eb31d6da58f9e320e41843af1036bb589ad",
"size": "1192",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "allaccess/compat.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "1898"
},
{
"name": "Python",
"bytes": "121919"
}
],
"symlink_target": ""
} |
import abc
import collections
import collections.abc
import functools
import operator
import sys
import types as _types
import typing
__all__ = [
# Super-special typing primitives.
'Any',
'ClassVar',
'Concatenate',
'Final',
'LiteralString',
'ParamSpec',
'ParamSpecArgs',
'ParamSpecKwargs',
'Self',
'Type',
'TypeVar',
'TypeVarTuple',
'Unpack',
# ABCs (from collections.abc).
'Awaitable',
'AsyncIterator',
'AsyncIterable',
'Coroutine',
'AsyncGenerator',
'AsyncContextManager',
'ChainMap',
# Concrete collection types.
'ContextManager',
'Counter',
'Deque',
'DefaultDict',
'NamedTuple',
'OrderedDict',
'TypedDict',
# Structural checks, a.k.a. protocols.
'SupportsIndex',
# One-off things.
'Annotated',
'assert_never',
'assert_type',
'clear_overloads',
'dataclass_transform',
'get_overloads',
'final',
'get_args',
'get_origin',
'get_type_hints',
'IntVar',
'is_typeddict',
'Literal',
'NewType',
'overload',
'override',
'Protocol',
'reveal_type',
'runtime',
'runtime_checkable',
'Text',
'TypeAlias',
'TypeGuard',
'TYPE_CHECKING',
'Never',
'NoReturn',
'Required',
'NotRequired',
]
# for backward compatibility
PEP_560 = True
GenericMeta = type
# The functions below are modified copies of typing internal helpers.
# They are needed by _ProtocolMeta and they provide support for PEP 646.
_marker = object()
def _check_generic(cls, parameters, elen=_marker):
"""Check correct count for parameters of a generic cls (internal helper).
This gives a nice error message in case of count mismatch.
"""
if not elen:
raise TypeError(f"{cls} is not a generic class")
if elen is _marker:
if not hasattr(cls, "__parameters__") or not cls.__parameters__:
raise TypeError(f"{cls} is not a generic class")
elen = len(cls.__parameters__)
alen = len(parameters)
if alen != elen:
if hasattr(cls, "__parameters__"):
parameters = [p for p in cls.__parameters__ if not _is_unpack(p)]
num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters)
if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples):
return
raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};"
f" actual {alen}, expected {elen}")
if sys.version_info >= (3, 10):
def _should_collect_from_parameters(t):
return isinstance(
t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType)
)
elif sys.version_info >= (3, 9):
def _should_collect_from_parameters(t):
return isinstance(t, (typing._GenericAlias, _types.GenericAlias))
else:
def _should_collect_from_parameters(t):
return isinstance(t, typing._GenericAlias) and not t._special
def _collect_type_vars(types, typevar_types=None):
"""Collect all type variable contained in types in order of
first appearance (lexicographic order). For example::
_collect_type_vars((T, List[S, T])) == (T, S)
"""
if typevar_types is None:
typevar_types = typing.TypeVar
tvars = []
for t in types:
if (
isinstance(t, typevar_types) and
t not in tvars and
not _is_unpack(t)
):
tvars.append(t)
if _should_collect_from_parameters(t):
tvars.extend([t for t in t.__parameters__ if t not in tvars])
return tuple(tvars)
NoReturn = typing.NoReturn
# Some unconstrained type variables. These are used by the container types.
# (These are not for export.)
T = typing.TypeVar('T') # Any type.
KT = typing.TypeVar('KT') # Key type.
VT = typing.TypeVar('VT') # Value type.
T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
if sys.version_info >= (3, 11):
from typing import Any
else:
class _AnyMeta(type):
def __instancecheck__(self, obj):
if self is Any:
raise TypeError("typing_extensions.Any cannot be used with isinstance()")
return super().__instancecheck__(obj)
def __repr__(self):
if self is Any:
return "typing_extensions.Any"
return super().__repr__()
class Any(metaclass=_AnyMeta):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
- Any assumed to have all methods.
- All values assumed to be instances of Any.
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
checks.
"""
def __new__(cls, *args, **kwargs):
if cls is Any:
raise TypeError("Any cannot be instantiated")
return super().__new__(cls, *args, **kwargs)
ClassVar = typing.ClassVar
# On older versions of typing there is an internal class named "Final".
# 3.8+
if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
Final = typing.Final
# 3.7
else:
class _FinalForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
Final = _FinalForm('Final',
doc="""A special typing construct to indicate that a name
cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.""")
if sys.version_info >= (3, 11):
final = typing.final
else:
# @final exists in 3.8+, but we backport it for all versions
# before 3.11 to keep support for the __final__ attribute.
# See https://bugs.python.org/issue46342
def final(f):
"""This decorator can be used to indicate to type checkers that
the decorated method cannot be overridden, and decorated class
cannot be subclassed. For example:
class Base:
@final
def done(self) -> None:
...
class Sub(Base):
def done(self) -> None: # Error reported by type checker
...
@final
class Leaf:
...
class Other(Leaf): # Error reported by type checker
...
There is no runtime checking of these properties. The decorator
sets the ``__final__`` attribute to ``True`` on the decorated object
to allow runtime introspection.
"""
try:
f.__final__ = True
except (AttributeError, TypeError):
# Skip the attribute silently if it is not writable.
# AttributeError happens if the object has __slots__ or a
# read-only property, TypeError if it's a builtin class.
pass
return f
def IntVar(name):
return typing.TypeVar(name)
# 3.8+:
if hasattr(typing, 'Literal'):
Literal = typing.Literal
# 3.7:
else:
class _LiteralForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
return typing._GenericAlias(self, parameters)
Literal = _LiteralForm('Literal',
doc="""A type that can be used to indicate to type checkers
that the corresponding value has a value literally equivalent
to the provided parameter. For example:
var: Literal[4] = 4
The type checker understands that 'var' is literally equal to
the value 4 and no other value.
Literal[...] cannot be subclassed. There is no runtime
checking verifying that the parameter is actually a value
instead of a type.""")
_overload_dummy = typing._overload_dummy # noqa
if hasattr(typing, "get_overloads"): # 3.11+
overload = typing.overload
get_overloads = typing.get_overloads
clear_overloads = typing.clear_overloads
else:
# {module: {qualname: {firstlineno: func}}}
_overload_registry = collections.defaultdict(
functools.partial(collections.defaultdict, dict)
)
def overload(func):
"""Decorator for overloaded functions/methods.
In a stub file, place two or more stub definitions for the same
function in a row, each decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
In a non-stub file (i.e. a regular .py file), do the same but
follow it with an implementation. The implementation should *not*
be decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
def utf8(value):
# implementation goes here
The overloads for a function can be retrieved at runtime using the
get_overloads() function.
"""
# classmethod and staticmethod
f = getattr(func, "__func__", func)
try:
_overload_registry[f.__module__][f.__qualname__][
f.__code__.co_firstlineno
] = func
except AttributeError:
# Not a normal function; ignore.
pass
return _overload_dummy
def get_overloads(func):
"""Return all defined overloads for *func* as a sequence."""
# classmethod and staticmethod
f = getattr(func, "__func__", func)
if f.__module__ not in _overload_registry:
return []
mod_dict = _overload_registry[f.__module__]
if f.__qualname__ not in mod_dict:
return []
return list(mod_dict[f.__qualname__].values())
def clear_overloads():
"""Clear all overloads in the registry."""
_overload_registry.clear()
# This is not a real generic class. Don't use outside annotations.
Type = typing.Type
# Various ABCs mimicking those in collections.abc.
# A few are simply re-exported for completeness.
Awaitable = typing.Awaitable
Coroutine = typing.Coroutine
AsyncIterable = typing.AsyncIterable
AsyncIterator = typing.AsyncIterator
Deque = typing.Deque
ContextManager = typing.ContextManager
AsyncContextManager = typing.AsyncContextManager
DefaultDict = typing.DefaultDict
# 3.7.2+
if hasattr(typing, 'OrderedDict'):
OrderedDict = typing.OrderedDict
# 3.7.0-3.7.2
else:
OrderedDict = typing._alias(collections.OrderedDict, (KT, VT))
Counter = typing.Counter
ChainMap = typing.ChainMap
AsyncGenerator = typing.AsyncGenerator
NewType = typing.NewType
Text = typing.Text
TYPE_CHECKING = typing.TYPE_CHECKING
_PROTO_WHITELIST = ['Callable', 'Awaitable',
'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator',
'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
'ContextManager', 'AsyncContextManager']
def _get_protocol_attrs(cls):
attrs = set()
for base in cls.__mro__[:-1]: # without object
if base.__name__ in ('Protocol', 'Generic'):
continue
annotations = getattr(base, '__annotations__', {})
for attr in list(base.__dict__.keys()) + list(annotations.keys()):
if (not attr.startswith('_abc_') and attr not in (
'__abstractmethods__', '__annotations__', '__weakref__',
'_is_protocol', '_is_runtime_protocol', '__dict__',
'__args__', '__slots__',
'__next_in_mro__', '__parameters__', '__origin__',
'__orig_bases__', '__extra__', '__tree_hash__',
'__doc__', '__subclasshook__', '__init__', '__new__',
'__module__', '_MutableMapping__marker', '_gorg')):
attrs.add(attr)
return attrs
def _is_callable_members_only(cls):
return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
def _maybe_adjust_parameters(cls):
"""Helper function used in Protocol.__init_subclass__ and _TypedDictMeta.__new__.
The contents of this function are very similar
to logic found in typing.Generic.__init_subclass__
on the CPython main branch.
"""
tvars = []
if '__orig_bases__' in cls.__dict__:
tvars = typing._collect_type_vars(cls.__orig_bases__)
# Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
# If found, tvars must be a subset of it.
# If not found, tvars is it.
# Also check for and reject plain Generic,
# and reject multiple Generic[...] and/or Protocol[...].
gvars = None
for base in cls.__orig_bases__:
if (isinstance(base, typing._GenericAlias) and
base.__origin__ in (typing.Generic, Protocol)):
# for error messages
the_base = base.__origin__.__name__
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...]"
" and/or Protocol[...] multiple types.")
gvars = base.__parameters__
if gvars is None:
gvars = tvars
else:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
s_args = ', '.join(str(g) for g in gvars)
raise TypeError(f"Some type variables ({s_vars}) are"
f" not listed in {the_base}[{s_args}]")
tvars = gvars
cls.__parameters__ = tuple(tvars)
# 3.8+
if hasattr(typing, 'Protocol'):
Protocol = typing.Protocol
# 3.7
else:
def _no_init(self, *args, **kwargs):
if type(self)._is_protocol:
raise TypeError('Protocols cannot be instantiated')
class _ProtocolMeta(abc.ABCMeta): # noqa: B024
# This metaclass is a bit unfortunate and exists only because of the lack
# of __instancehook__.
def __instancecheck__(cls, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if ((not getattr(cls, '_is_protocol', False) or
_is_callable_members_only(cls)) and
issubclass(instance.__class__, cls)):
return True
if cls._is_protocol:
if all(hasattr(instance, attr) and
(not callable(getattr(cls, attr, None)) or
getattr(instance, attr) is not None)
for attr in _get_protocol_attrs(cls)):
return True
return super().__instancecheck__(instance)
class Protocol(metaclass=_ProtocolMeta):
# There is quite a lot of overlapping code with typing.Generic.
# Unfortunately it is hard to avoid this while these live in two different
# modules. The duplicated code will be removed when Protocol is moved to typing.
"""Base class for protocol classes. Protocol classes are defined as::
class Proto(Protocol):
def meth(self) -> int:
...
Such classes are primarily used with static type checkers that recognize
structural subtyping (static duck-typing), for example::
class C:
def meth(self) -> int:
return 0
def func(x: Proto) -> int:
return x.meth()
func(C()) # Passes static type check
See PEP 544 for details. Protocol classes decorated with
@typing_extensions.runtime act as simple-minded runtime protocol that checks
only the presence of given attributes, ignoring their type signatures.
Protocol classes can be generic, they are defined as::
class GenProto(Protocol[T]):
def meth(self) -> T:
...
"""
__slots__ = ()
_is_protocol = True
def __new__(cls, *args, **kwds):
if cls is Protocol:
raise TypeError("Type Protocol cannot be instantiated; "
"it can only be used as a base class")
return super().__new__(cls)
@typing._tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple):
params = (params,)
if not params and cls is not typing.Tuple:
raise TypeError(
f"Parameter list to {cls.__qualname__}[...] cannot be empty")
msg = "Parameters to generic types must be types."
params = tuple(typing._type_check(p, msg) for p in params) # noqa
if cls is Protocol:
# Generic can only be subscripted with unique type variables.
if not all(isinstance(p, typing.TypeVar) for p in params):
i = 0
while isinstance(params[i], typing.TypeVar):
i += 1
raise TypeError(
"Parameters to Protocol[...] must all be type variables."
f" Parameter {i + 1} is {params[i]}")
if len(set(params)) != len(params):
raise TypeError(
"Parameters to Protocol[...] must all be unique")
else:
# Subscripting a regular Generic subclass.
_check_generic(cls, params, len(cls.__parameters__))
return typing._GenericAlias(cls, params)
def __init_subclass__(cls, *args, **kwargs):
if '__orig_bases__' in cls.__dict__:
error = typing.Generic in cls.__orig_bases__
else:
error = typing.Generic in cls.__bases__
if error:
raise TypeError("Cannot inherit from plain Generic")
_maybe_adjust_parameters(cls)
# Determine if this is a protocol or a concrete subclass.
if not cls.__dict__.get('_is_protocol', None):
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
# Set (or override) the protocol subclass hook.
def _proto_hook(other):
if not cls.__dict__.get('_is_protocol', None):
return NotImplemented
if not getattr(cls, '_is_runtime_protocol', False):
if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
return NotImplemented
raise TypeError("Instance and class checks can only be used with"
" @runtime protocols")
if not _is_callable_members_only(cls):
if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
return NotImplemented
raise TypeError("Protocols with non-method members"
" don't support issubclass()")
if not isinstance(other, type):
# Same error as for issubclass(1, int)
raise TypeError('issubclass() arg 1 must be a class')
for attr in _get_protocol_attrs(cls):
for base in other.__mro__:
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
annotations = getattr(base, '__annotations__', {})
if (isinstance(annotations, typing.Mapping) and
attr in annotations and
isinstance(other, _ProtocolMeta) and
other._is_protocol):
break
else:
return NotImplemented
return True
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
# We have nothing more to do for non-protocols.
if not cls._is_protocol:
return
# Check consistency of bases.
for base in cls.__bases__:
if not (base in (object, typing.Generic) or
base.__module__ == 'collections.abc' and
base.__name__ in _PROTO_WHITELIST or
isinstance(base, _ProtocolMeta) and base._is_protocol):
raise TypeError('Protocols can only inherit from other'
f' protocols, got {repr(base)}')
cls.__init__ = _no_init
# 3.8+
if hasattr(typing, 'runtime_checkable'):
runtime_checkable = typing.runtime_checkable
# 3.7
else:
def runtime_checkable(cls):
"""Mark a protocol class as a runtime protocol, so that it
can be used with isinstance() and issubclass(). Raise TypeError
if applied to a non-protocol class.
This allows a simple-minded structural check very similar to the
one-offs in collections.abc such as Hashable.
"""
if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
raise TypeError('@runtime_checkable can be only applied to protocol classes,'
f' got {cls!r}')
cls._is_runtime_protocol = True
return cls
# Exists for backwards compatibility.
runtime = runtime_checkable
# 3.8+
if hasattr(typing, 'SupportsIndex'):
SupportsIndex = typing.SupportsIndex
# 3.7
else:
@runtime_checkable
class SupportsIndex(Protocol):
__slots__ = ()
@abc.abstractmethod
def __index__(self) -> int:
pass
if hasattr(typing, "Required"):
# The standard library TypedDict in Python 3.8 does not store runtime information
# about which (if any) keys are optional. See https://bugs.python.org/issue38834
# The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
# keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
# The standard library TypedDict below Python 3.11 does not store runtime
# information about optional and required keys when using Required or NotRequired.
# Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11.
TypedDict = typing.TypedDict
_TypedDictMeta = typing._TypedDictMeta
is_typeddict = typing.is_typeddict
else:
def _check_fails(cls, other):
try:
if sys._getframe(1).f_globals['__name__'] not in ['abc',
'functools',
'typing']:
# Typed dicts are only for static structural subtyping.
raise TypeError('TypedDict does not support instance and class checks')
except (AttributeError, ValueError):
pass
return False
def _dict_new(*args, **kwargs):
if not args:
raise TypeError('TypedDict.__new__(): not enough arguments')
_, args = args[0], args[1:] # allow the "cls" keyword be passed
return dict(*args, **kwargs)
_dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
def _typeddict_new(*args, total=True, **kwargs):
if not args:
raise TypeError('TypedDict.__new__(): not enough arguments')
_, args = args[0], args[1:] # allow the "cls" keyword be passed
if args:
typename, args = args[0], args[1:] # allow the "_typename" keyword be passed
elif '_typename' in kwargs:
typename = kwargs.pop('_typename')
import warnings
warnings.warn("Passing '_typename' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
raise TypeError("TypedDict.__new__() missing 1 required positional "
"argument: '_typename'")
if args:
try:
fields, = args # allow the "_fields" keyword be passed
except ValueError:
raise TypeError('TypedDict.__new__() takes from 2 to 3 '
f'positional arguments but {len(args) + 2} '
'were given')
elif '_fields' in kwargs and len(kwargs) == 1:
fields = kwargs.pop('_fields')
import warnings
warnings.warn("Passing '_fields' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
fields = None
if fields is None:
fields = kwargs
elif kwargs:
raise TypeError("TypedDict takes either a dict or keyword arguments,"
" but not both")
ns = {'__annotations__': dict(fields)}
try:
# Setting correct module is necessary to make typed dict classes pickleable.
ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return _TypedDictMeta(typename, (), ns, total=total)
_typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
' /, *, total=True, **kwargs)')
class _TypedDictMeta(type):
def __init__(cls, name, bases, ns, total=True):
super().__init__(name, bases, ns)
def __new__(cls, name, bases, ns, total=True):
# Create new typed dict class object.
# This method is called directly when TypedDict is subclassed,
# or via _typeddict_new when TypedDict is instantiated. This way
# TypedDict supports all three syntaxes described in its docstring.
# Subclasses and instances of TypedDict return actual dictionaries
# via _dict_new.
ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
# Don't insert typing.Generic into __bases__ here,
# or Generic.__init_subclass__ will raise TypeError
# in the super().__new__() call.
# Instead, monkey-patch __bases__ onto the class after it's been created.
tp_dict = super().__new__(cls, name, (dict,), ns)
if any(issubclass(base, typing.Generic) for base in bases):
tp_dict.__bases__ = (typing.Generic, dict)
_maybe_adjust_parameters(tp_dict)
annotations = {}
own_annotations = ns.get('__annotations__', {})
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
own_annotations = {
n: typing._type_check(tp, msg) for n, tp in own_annotations.items()
}
required_keys = set()
optional_keys = set()
for base in bases:
annotations.update(base.__dict__.get('__annotations__', {}))
required_keys.update(base.__dict__.get('__required_keys__', ()))
optional_keys.update(base.__dict__.get('__optional_keys__', ()))
annotations.update(own_annotations)
for annotation_key, annotation_type in own_annotations.items():
annotation_origin = get_origin(annotation_type)
if annotation_origin is Annotated:
annotation_args = get_args(annotation_type)
if annotation_args:
annotation_type = annotation_args[0]
annotation_origin = get_origin(annotation_type)
if annotation_origin is Required:
required_keys.add(annotation_key)
elif annotation_origin is NotRequired:
optional_keys.add(annotation_key)
elif total:
required_keys.add(annotation_key)
else:
optional_keys.add(annotation_key)
tp_dict.__annotations__ = annotations
tp_dict.__required_keys__ = frozenset(required_keys)
tp_dict.__optional_keys__ = frozenset(optional_keys)
if not hasattr(tp_dict, '__total__'):
tp_dict.__total__ = total
return tp_dict
__instancecheck__ = __subclasscheck__ = _check_fails
TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
TypedDict.__module__ = __name__
TypedDict.__doc__ = \
"""A simple typed name space. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type that expects all of its
instances to have a certain set of keys, with each key
associated with a value of a consistent type. This expectation
is not checked at runtime but is only enforced by type checkers.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports two additional equivalent forms::
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
The class syntax is only supported in Python 3.6+, while two other
syntax forms work for Python 2.7 and 3.2+
"""
if hasattr(typing, "_TypedDictMeta"):
_TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta)
else:
_TYPEDDICT_TYPES = (_TypedDictMeta,)
def is_typeddict(tp):
"""Check if an annotation is a TypedDict class
For example::
class Film(TypedDict):
title: str
year: int
is_typeddict(Film) # => True
is_typeddict(Union[list, str]) # => False
"""
return isinstance(tp, tuple(_TYPEDDICT_TYPES))
if hasattr(typing, "assert_type"):
assert_type = typing.assert_type
else:
def assert_type(__val, __typ):
"""Assert (to the type checker) that the value is of the given type.
When the type checker encounters a call to assert_type(), it
emits an error if the value is not of the specified type::
def greet(name: str) -> None:
assert_type(name, str) # ok
assert_type(name, int) # type checker error
At runtime this returns the first argument unchanged and otherwise
does nothing.
"""
return __val
if hasattr(typing, "Required"):
get_type_hints = typing.get_type_hints
else:
import functools
import types
# replaces _strip_annotations()
def _strip_extras(t):
"""Strips Annotated, Required and NotRequired from a given type."""
if isinstance(t, _AnnotatedAlias):
return _strip_extras(t.__origin__)
if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired):
return _strip_extras(t.__args__[0])
if isinstance(t, typing._GenericAlias):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return t.copy_with(stripped_args)
if hasattr(types, "GenericAlias") and isinstance(t, types.GenericAlias):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return types.GenericAlias(t.__origin__, stripped_args)
if hasattr(types, "UnionType") and isinstance(t, types.UnionType):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return functools.reduce(operator.or_, stripped_args)
return t
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T'
(unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if hasattr(typing, "Annotated"):
hint = typing.get_type_hints(
obj, globalns=globalns, localns=localns, include_extras=True
)
else:
hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
if include_extras:
return hint
return {k: _strip_extras(t) for k, t in hint.items()}
# Python 3.9+ has PEP 593 (Annotated)
if hasattr(typing, 'Annotated'):
Annotated = typing.Annotated
# Not exported and not a public API, but needed for get_origin() and get_args()
# to work.
_AnnotatedAlias = typing._AnnotatedAlias
# 3.7-3.8
else:
class _AnnotatedAlias(typing._GenericAlias, _root=True):
"""Runtime representation of an annotated type.
At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
with extra annotations. The alias behaves like a normal typing alias,
instantiating is the same as instantiating the underlying type, binding
it to types is also the same.
"""
def __init__(self, origin, metadata):
if isinstance(origin, _AnnotatedAlias):
metadata = origin.__metadata__ + metadata
origin = origin.__origin__
super().__init__(origin, origin)
self.__metadata__ = metadata
def copy_with(self, params):
assert len(params) == 1
new_type = params[0]
return _AnnotatedAlias(new_type, self.__metadata__)
def __repr__(self):
return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, "
f"{', '.join(repr(a) for a in self.__metadata__)}]")
def __reduce__(self):
return operator.getitem, (
Annotated, (self.__origin__,) + self.__metadata__
)
def __eq__(self, other):
if not isinstance(other, _AnnotatedAlias):
return NotImplemented
if self.__origin__ != other.__origin__:
return False
return self.__metadata__ == other.__metadata__
def __hash__(self):
return hash((self.__origin__, self.__metadata__))
class Annotated:
"""Add context specific metadata to a type.
Example: Annotated[int, runtime_check.Unsigned] indicates to the
hypothetical runtime_check module that this type is an unsigned int.
Every other consumer of this type can ignore this metadata and treat
this type as int.
The first argument to Annotated must be a valid type (and will be in
the __origin__ field), the remaining arguments are kept as a tuple in
the __extra__ field.
Details:
- It's an error to call `Annotated` with less than two arguments.
- Nested Annotated are flattened::
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
- Instantiating an annotated type is equivalent to instantiating the
underlying type::
Annotated[C, Ann1](5) == C(5)
- Annotated can be used as a generic type alias::
Optimized = Annotated[T, runtime.Optimize()]
Optimized[int] == Annotated[int, runtime.Optimize()]
OptimizedList = Annotated[List[T], runtime.Optimize()]
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
"""
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise TypeError("Type Annotated cannot be instantiated.")
@typing._tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple) or len(params) < 2:
raise TypeError("Annotated[...] should be used "
"with at least two arguments (a type and an "
"annotation).")
allowed_special_forms = (ClassVar, Final)
if get_origin(params[0]) in allowed_special_forms:
origin = params[0]
else:
msg = "Annotated[t, ...]: t must be a type."
origin = typing._type_check(params[0], msg)
metadata = tuple(params[1:])
return _AnnotatedAlias(origin, metadata)
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
f"Cannot subclass {cls.__module__}.Annotated"
)
# Python 3.8 has get_origin() and get_args() but those implementations aren't
# Annotated-aware, so we can't use those. Python 3.9's versions don't support
# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do.
if sys.version_info[:2] >= (3, 10):
get_origin = typing.get_origin
get_args = typing.get_args
# 3.7-3.9
else:
try:
# 3.9+
from typing import _BaseGenericAlias
except ImportError:
_BaseGenericAlias = typing._GenericAlias
try:
# 3.9+
from typing import GenericAlias as _typing_GenericAlias
except ImportError:
_typing_GenericAlias = typing._GenericAlias
def get_origin(tp):
"""Get the unsubscripted version of a type.
This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
and Annotated. Return None for unsupported types. Examples::
get_origin(Literal[42]) is Literal
get_origin(int) is None
get_origin(ClassVar[int]) is ClassVar
get_origin(Generic) is Generic
get_origin(Generic[T]) is Generic
get_origin(Union[T, int]) is Union
get_origin(List[Tuple[T, T]][int]) == list
get_origin(P.args) is P
"""
if isinstance(tp, _AnnotatedAlias):
return Annotated
if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias,
ParamSpecArgs, ParamSpecKwargs)):
return tp.__origin__
if tp is typing.Generic:
return typing.Generic
return None
def get_args(tp):
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if isinstance(tp, _AnnotatedAlias):
return (tp.__origin__,) + tp.__metadata__
if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)):
if getattr(tp, "_special", False):
return ()
res = tp.__args__
if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
res = (list(res[:-1]), res[-1])
return res
return ()
# 3.10+
if hasattr(typing, 'TypeAlias'):
TypeAlias = typing.TypeAlias
# 3.9
elif sys.version_info[:2] >= (3, 9):
class _TypeAliasForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
@_TypeAliasForm
def TypeAlias(self, parameters):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
raise TypeError(f"{self} is not subscriptable")
# 3.7-3.8
else:
class _TypeAliasForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
TypeAlias = _TypeAliasForm('TypeAlias',
doc="""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example
above.""")
class _DefaultMixin:
"""Mixin for TypeVarLike defaults."""
__slots__ = ()
def __init__(self, default):
if isinstance(default, (tuple, list)):
self.__default__ = tuple((typing._type_check(d, "Default must be a type")
for d in default))
elif default:
self.__default__ = typing._type_check(default, "Default must be a type")
else:
self.__default__ = None
# Add default and infer_variance parameters from PEP 696 and 695
class TypeVar(typing.TypeVar, _DefaultMixin, _root=True):
"""Type variable."""
__module__ = 'typing'
def __init__(self, name, *constraints, bound=None,
covariant=False, contravariant=False,
default=None, infer_variance=False):
super().__init__(name, *constraints, bound=bound, covariant=covariant,
contravariant=contravariant)
_DefaultMixin.__init__(self, default)
self.__infer_variance__ = infer_variance
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
# Python 3.10+ has PEP 612
if hasattr(typing, 'ParamSpecArgs'):
ParamSpecArgs = typing.ParamSpecArgs
ParamSpecKwargs = typing.ParamSpecKwargs
# 3.7-3.9
else:
class _Immutable:
"""Mixin to indicate that object should not be copied."""
__slots__ = ()
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
class ParamSpecArgs(_Immutable):
"""The args for a ParamSpec object.
Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
ParamSpecArgs objects have a reference back to their ParamSpec:
P.args.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.args"
def __eq__(self, other):
if not isinstance(other, ParamSpecArgs):
return NotImplemented
return self.__origin__ == other.__origin__
class ParamSpecKwargs(_Immutable):
"""The kwargs for a ParamSpec object.
Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
ParamSpecKwargs objects have a reference back to their ParamSpec:
P.kwargs.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.kwargs"
def __eq__(self, other):
if not isinstance(other, ParamSpecKwargs):
return NotImplemented
return self.__origin__ == other.__origin__
# 3.10+
if hasattr(typing, 'ParamSpec'):
# Add default Parameter - PEP 696
class ParamSpec(typing.ParamSpec, _DefaultMixin, _root=True):
"""Parameter specification variable."""
__module__ = 'typing'
def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
default=None):
super().__init__(name, bound=bound, covariant=covariant,
contravariant=contravariant)
_DefaultMixin.__init__(self, default)
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
# 3.7-3.9
else:
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
class ParamSpec(list, _DefaultMixin):
"""Parameter specification variable.
Usage::
P = ParamSpec('P')
Parameter specification variables exist primarily for the benefit of static
type checkers. They are used to forward the parameter types of one
callable to another callable, a pattern commonly found in higher order
functions and decorators. They are only valid when used in ``Concatenate``,
or s the first argument to ``Callable``. In Python 3.10 and higher,
they are also supported in user-defined Generics at runtime.
See class Generic for more information on generic types. An
example for annotating a decorator::
T = TypeVar('T')
P = ParamSpec('P')
def add_logging(f: Callable[P, T]) -> Callable[P, T]:
'''A type-safe decorator to add logging to a function.'''
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
logging.info(f'{f.__name__} was called')
return f(*args, **kwargs)
return inner
@add_logging
def add_two(x: float, y: float) -> float:
'''Add two numbers together.'''
return x + y
Parameter specification variables defined with covariant=True or
contravariant=True can be used to declare covariant or contravariant
generic types. These keyword arguments are valid, but their actual semantics
are yet to be decided. See PEP 612 for details.
Parameter specification variables can be introspected. e.g.:
P.__name__ == 'T'
P.__bound__ == None
P.__covariant__ == False
P.__contravariant__ == False
Note that only parameter specification variables defined in global scope can
be pickled.
"""
# Trick Generic __parameters__.
__class__ = typing.TypeVar
@property
def args(self):
return ParamSpecArgs(self)
@property
def kwargs(self):
return ParamSpecKwargs(self)
def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
default=None):
super().__init__([self])
self.__name__ = name
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
if bound:
self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
else:
self.__bound__ = None
_DefaultMixin.__init__(self, default)
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
def __repr__(self):
if self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
return self is other
def __reduce__(self):
return self.__name__
# Hack to get typing._type_check to pass.
def __call__(self, *args, **kwargs):
pass
# 3.7-3.9
if not hasattr(typing, 'Concatenate'):
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
class _ConcatenateGenericAlias(list):
# Trick Generic into looking into this for __parameters__.
__class__ = typing._GenericAlias
# Flag in 3.8.
_special = False
def __init__(self, origin, args):
super().__init__(args)
self.__origin__ = origin
self.__args__ = args
def __repr__(self):
_type_repr = typing._type_repr
return (f'{_type_repr(self.__origin__)}'
f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]')
def __hash__(self):
return hash((self.__origin__, self.__args__))
# Hack to get typing._type_check to pass in Generic.
def __call__(self, *args, **kwargs):
pass
@property
def __parameters__(self):
return tuple(
tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec))
)
# 3.7-3.9
@typing._tp_cache
def _concatenate_getitem(self, parameters):
if parameters == ():
raise TypeError("Cannot take a Concatenate of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if not isinstance(parameters[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
msg = "Concatenate[arg, ...]: each arg must be a type."
parameters = tuple(typing._type_check(p, msg) for p in parameters)
return _ConcatenateGenericAlias(self, parameters)
# 3.10+
if hasattr(typing, 'Concatenate'):
Concatenate = typing.Concatenate
_ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa
# 3.9
elif sys.version_info[:2] >= (3, 9):
@_TypeAliasForm
def Concatenate(self, parameters):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
return _concatenate_getitem(self, parameters)
# 3.7-8
else:
class _ConcatenateForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
return _concatenate_getitem(self, parameters)
Concatenate = _ConcatenateForm(
'Concatenate',
doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
""")
# 3.10+
if hasattr(typing, 'TypeGuard'):
TypeGuard = typing.TypeGuard
# 3.9
elif sys.version_info[:2] >= (3, 9):
class _TypeGuardForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
@_TypeGuardForm
def TypeGuard(self, parameters):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
item = typing._type_check(parameters, f'{self} accepts only a single type.')
return typing._GenericAlias(self, (item,))
# 3.7-3.8
else:
class _TypeGuardForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type')
return typing._GenericAlias(self, (item,))
TypeGuard = _TypeGuardForm(
'TypeGuard',
doc="""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
""")
# Vendored from cpython typing._SpecialFrom
class _SpecialForm(typing._Final, _root=True):
__slots__ = ('_name', '__doc__', '_getitem')
def __init__(self, getitem):
self._getitem = getitem
self._name = getitem.__name__
self.__doc__ = getitem.__doc__
def __getattr__(self, item):
if item in {'__name__', '__qualname__'}:
return self._name
raise AttributeError(item)
def __mro_entries__(self, bases):
raise TypeError(f"Cannot subclass {self!r}")
def __repr__(self):
return f'typing_extensions.{self._name}'
def __reduce__(self):
return self._name
def __call__(self, *args, **kwds):
raise TypeError(f"Cannot instantiate {self!r}")
def __or__(self, other):
return typing.Union[self, other]
def __ror__(self, other):
return typing.Union[other, self]
def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance()")
def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass()")
@typing._tp_cache
def __getitem__(self, parameters):
return self._getitem(self, parameters)
if hasattr(typing, "LiteralString"):
LiteralString = typing.LiteralString
else:
@_SpecialForm
def LiteralString(self, params):
"""Represents an arbitrary literal string.
Example::
from typing_extensions import LiteralString
def query(sql: LiteralString) -> ...:
...
query("SELECT * FROM table") # ok
query(f"SELECT * FROM {input()}") # not ok
See PEP 675 for details.
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, "Self"):
Self = typing.Self
else:
@_SpecialForm
def Self(self, params):
"""Used to spell the type of "self" in classes.
Example::
from typing import Self
class ReturnsSelf:
def parse(self, data: bytes) -> Self:
...
return self
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, "Never"):
Never = typing.Never
else:
@_SpecialForm
def Never(self, params):
"""The bottom type, a type that has no members.
This can be used to define a function that should never be
called, or a function that never returns::
from typing_extensions import Never
def never_call_me(arg: Never) -> None:
pass
def int_or_str(arg: int | str) -> None:
never_call_me(arg) # type checker error
match arg:
case int():
print("It's an int")
case str():
print("It's a str")
case _:
never_call_me(arg) # ok, arg is of type Never
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, 'Required'):
Required = typing.Required
NotRequired = typing.NotRequired
elif sys.version_info[:2] >= (3, 9):
class _ExtensionsSpecialForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
@_ExtensionsSpecialForm
def Required(self, parameters):
"""A special typing construct to mark a key of a total=False TypedDict
as required. For example:
class Movie(TypedDict, total=False):
title: Required[str]
year: int
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
There is no runtime checking that a required key is actually provided
when instantiating a related TypedDict.
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
@_ExtensionsSpecialForm
def NotRequired(self, parameters):
"""A special typing construct to mark a key of a TypedDict as
potentially missing. For example:
class Movie(TypedDict):
title: str
year: NotRequired[int]
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
else:
class _RequiredForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
Required = _RequiredForm(
'Required',
doc="""A special typing construct to mark a key of a total=False TypedDict
as required. For example:
class Movie(TypedDict, total=False):
title: Required[str]
year: int
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
There is no runtime checking that a required key is actually provided
when instantiating a related TypedDict.
""")
NotRequired = _RequiredForm(
'NotRequired',
doc="""A special typing construct to mark a key of a TypedDict as
potentially missing. For example:
class Movie(TypedDict):
title: str
year: NotRequired[int]
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
""")
if hasattr(typing, "Unpack"): # 3.11+
Unpack = typing.Unpack
elif sys.version_info[:2] >= (3, 9):
class _UnpackSpecialForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
class _UnpackAlias(typing._GenericAlias, _root=True):
__class__ = typing.TypeVar
@_UnpackSpecialForm
def Unpack(self, parameters):
"""A special typing construct to unpack a variadic type. For example:
Shape = TypeVarTuple('Shape')
Batch = NewType('Batch', int)
def add_batch_axis(
x: Array[Unpack[Shape]]
) -> Array[Batch, Unpack[Shape]]: ...
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return _UnpackAlias(self, (item,))
def _is_unpack(obj):
return isinstance(obj, _UnpackAlias)
else:
class _UnpackAlias(typing._GenericAlias, _root=True):
__class__ = typing.TypeVar
class _UnpackForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return _UnpackAlias(self, (item,))
Unpack = _UnpackForm(
'Unpack',
doc="""A special typing construct to unpack a variadic type. For example:
Shape = TypeVarTuple('Shape')
Batch = NewType('Batch', int)
def add_batch_axis(
x: Array[Unpack[Shape]]
) -> Array[Batch, Unpack[Shape]]: ...
""")
def _is_unpack(obj):
return isinstance(obj, _UnpackAlias)
if hasattr(typing, "TypeVarTuple"): # 3.11+
# Add default Parameter - PEP 696
class TypeVarTuple(typing.TypeVarTuple, _DefaultMixin, _root=True):
"""Type variable tuple."""
def __init__(self, name, *, default=None):
super().__init__(name)
_DefaultMixin.__init__(self, default)
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
else:
class TypeVarTuple(_DefaultMixin):
"""Type variable tuple.
Usage::
Ts = TypeVarTuple('Ts')
In the same way that a normal type variable is a stand-in for a single
type such as ``int``, a type variable *tuple* is a stand-in for a *tuple*
type such as ``Tuple[int, str]``.
Type variable tuples can be used in ``Generic`` declarations.
Consider the following example::
class Array(Generic[*Ts]): ...
The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``,
where ``T1`` and ``T2`` are type variables. To use these type variables
as type parameters of ``Array``, we must *unpack* the type variable tuple using
the star operator: ``*Ts``. The signature of ``Array`` then behaves
as if we had simply written ``class Array(Generic[T1, T2]): ...``.
In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows
us to parameterise the class with an *arbitrary* number of type parameters.
Type variable tuples can be used anywhere a normal ``TypeVar`` can.
This includes class definitions, as shown above, as well as function
signatures and variable annotations::
class Array(Generic[*Ts]):
def __init__(self, shape: Tuple[*Ts]):
self._shape: Tuple[*Ts] = shape
def get_shape(self) -> Tuple[*Ts]:
return self._shape
shape = (Height(480), Width(640))
x: Array[Height, Width] = Array(shape)
y = abs(x) # Inferred type is Array[Height, Width]
z = x + x # ... is Array[Height, Width]
x.get_shape() # ... is tuple[Height, Width]
"""
# Trick Generic __parameters__.
__class__ = typing.TypeVar
def __iter__(self):
yield self.__unpacked__
def __init__(self, name, *, default=None):
self.__name__ = name
_DefaultMixin.__init__(self, default)
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
self.__unpacked__ = Unpack[self]
def __repr__(self):
return self.__name__
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
return self is other
def __reduce__(self):
return self.__name__
def __init_subclass__(self, *args, **kwds):
if '_root' not in kwds:
raise TypeError("Cannot subclass special typing classes")
if hasattr(typing, "reveal_type"):
reveal_type = typing.reveal_type
else:
def reveal_type(__obj: T) -> T:
"""Reveal the inferred type of a variable.
When a static type checker encounters a call to ``reveal_type()``,
it will emit the inferred type of the argument::
x: int = 1
reveal_type(x)
Running a static type checker (e.g., ``mypy``) on this example
will produce output similar to 'Revealed type is "builtins.int"'.
At runtime, the function prints the runtime type of the
argument and returns it unchanged.
"""
print(f"Runtime type is {type(__obj).__name__!r}", file=sys.stderr)
return __obj
if hasattr(typing, "assert_never"):
assert_never = typing.assert_never
else:
def assert_never(__arg: Never) -> Never:
"""Assert to the type checker that a line of code is unreachable.
Example::
def int_or_str(arg: int | str) -> None:
match arg:
case int():
print("It's an int")
case str():
print("It's a str")
case _:
assert_never(arg)
If a type checker finds that a call to assert_never() is
reachable, it will emit an error.
At runtime, this throws an exception when called.
"""
raise AssertionError("Expected code to be unreachable")
if hasattr(typing, 'dataclass_transform'):
dataclass_transform = typing.dataclass_transform
else:
def dataclass_transform(
*,
eq_default: bool = True,
order_default: bool = False,
kw_only_default: bool = False,
field_specifiers: typing.Tuple[
typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]],
...
] = (),
**kwargs: typing.Any,
) -> typing.Callable[[T], T]:
"""Decorator that marks a function, class, or metaclass as providing
dataclass-like behavior.
Example:
from typing_extensions import dataclass_transform
_T = TypeVar("_T")
# Used on a decorator function
@dataclass_transform()
def create_model(cls: type[_T]) -> type[_T]:
...
return cls
@create_model
class CustomerModel:
id: int
name: str
# Used on a base class
@dataclass_transform()
class ModelBase: ...
class CustomerModel(ModelBase):
id: int
name: str
# Used on a metaclass
@dataclass_transform()
class ModelMeta(type): ...
class ModelBase(metaclass=ModelMeta): ...
class CustomerModel(ModelBase):
id: int
name: str
Each of the ``CustomerModel`` classes defined in this example will now
behave similarly to a dataclass created with the ``@dataclasses.dataclass``
decorator. For example, the type checker will synthesize an ``__init__``
method.
The arguments to this decorator can be used to customize this behavior:
- ``eq_default`` indicates whether the ``eq`` parameter is assumed to be
True or False if it is omitted by the caller.
- ``order_default`` indicates whether the ``order`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``kw_only_default`` indicates whether the ``kw_only`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``field_specifiers`` specifies a static list of supported classes
or functions that describe fields, similar to ``dataclasses.field()``.
At runtime, this decorator records its arguments in the
``__dataclass_transform__`` attribute on the decorated object.
See PEP 681 for details.
"""
def decorator(cls_or_fn):
cls_or_fn.__dataclass_transform__ = {
"eq_default": eq_default,
"order_default": order_default,
"kw_only_default": kw_only_default,
"field_specifiers": field_specifiers,
"kwargs": kwargs,
}
return cls_or_fn
return decorator
if hasattr(typing, "override"):
override = typing.override
else:
_F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any])
def override(__arg: _F) -> _F:
"""Indicate that a method is intended to override a method in a base class.
Usage:
class Base:
def method(self) -> None: ...
pass
class Child(Base):
@override
def method(self) -> None:
super().method()
When this decorator is applied to a method, the type checker will
validate that it overrides a method with the same name on a base class.
This helps prevent bugs that may occur when a base class is changed
without an equivalent change to a child class.
See PEP 698 for details.
"""
return __arg
# We have to do some monkey patching to deal with the dual nature of
# Unpack/TypeVarTuple:
# - We want Unpack to be a kind of TypeVar so it gets accepted in
# Generic[Unpack[Ts]]
# - We want it to *not* be treated as a TypeVar for the purposes of
# counting generic parameters, so that when we subscript a generic,
# the runtime doesn't try to substitute the Unpack with the subscripted type.
if not hasattr(typing, "TypeVarTuple"):
typing._collect_type_vars = _collect_type_vars
typing._check_generic = _check_generic
# Backport typing.NamedTuple as it exists in Python 3.11.
# In 3.11, the ability to define generic `NamedTuple`s was supported.
# This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8.
if sys.version_info >= (3, 11):
NamedTuple = typing.NamedTuple
else:
def _caller():
try:
return sys._getframe(2).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError): # For platforms without _getframe()
return None
def _make_nmtuple(name, types, module, defaults=()):
fields = [n for n, t in types]
annotations = {n: typing._type_check(t, f"field {n} annotation must be a type")
for n, t in types}
nm_tpl = collections.namedtuple(name, fields,
defaults=defaults, module=module)
nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations
# The `_field_types` attribute was removed in 3.9;
# in earlier versions, it is the same as the `__annotations__` attribute
if sys.version_info < (3, 9):
nm_tpl._field_types = annotations
return nm_tpl
_prohibited_namedtuple_fields = typing._prohibited
_special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'})
class _NamedTupleMeta(type):
def __new__(cls, typename, bases, ns):
assert _NamedTuple in bases
for base in bases:
if base is not _NamedTuple and base is not typing.Generic:
raise TypeError(
'can only inherit from a NamedTuple type and Generic')
bases = tuple(tuple if base is _NamedTuple else base for base in bases)
types = ns.get('__annotations__', {})
default_names = []
for field_name in types:
if field_name in ns:
default_names.append(field_name)
elif default_names:
raise TypeError(f"Non-default namedtuple field {field_name} "
f"cannot follow default field"
f"{'s' if len(default_names) > 1 else ''} "
f"{', '.join(default_names)}")
nm_tpl = _make_nmtuple(
typename, types.items(),
defaults=[ns[n] for n in default_names],
module=ns['__module__']
)
nm_tpl.__bases__ = bases
if typing.Generic in bases:
class_getitem = typing.Generic.__class_getitem__.__func__
nm_tpl.__class_getitem__ = classmethod(class_getitem)
# update from user namespace without overriding special namedtuple attributes
for key in ns:
if key in _prohibited_namedtuple_fields:
raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
elif key not in _special_namedtuple_fields and key not in nm_tpl._fields:
setattr(nm_tpl, key, ns[key])
if typing.Generic in bases:
nm_tpl.__init_subclass__()
return nm_tpl
def NamedTuple(__typename, __fields=None, **kwargs):
if __fields is None:
__fields = kwargs.items()
elif kwargs:
raise TypeError("Either list of fields or keywords"
" can be provided to NamedTuple, not both")
return _make_nmtuple(__typename, __fields, module=_caller())
NamedTuple.__doc__ = typing.NamedTuple.__doc__
_NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {})
# On 3.8+, alter the signature so that it matches typing.NamedTuple.
# The signature of typing.NamedTuple on >=3.8 is invalid syntax in Python 3.7,
# so just leave the signature as it is on 3.7.
if sys.version_info >= (3, 8):
NamedTuple.__text_signature__ = '(typename, fields=None, /, **kwargs)'
def _namedtuple_mro_entries(bases):
assert NamedTuple in bases
return (_NamedTuple,)
NamedTuple.__mro_entries__ = _namedtuple_mro_entries
| {
"content_hash": "1cafc3c9e0f2a1aefa053401f4e29749",
"timestamp": "",
"source": "github",
"line_count": 2209,
"max_line_length": 90,
"avg_line_length": 36.250792213671346,
"alnum_prop": 0.5626139513973876,
"repo_name": "paolodedios/pybuilder",
"id": "ef42417c208e93c55d704728d3e88dfe46250d92",
"size": "80078",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/main/python/pybuilder/_vendor/typing_extensions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1567"
},
{
"name": "Nu",
"bytes": "3265"
},
{
"name": "Perl",
"bytes": "4025"
},
{
"name": "PowerShell",
"bytes": "1810"
},
{
"name": "Python",
"bytes": "2699121"
},
{
"name": "Shell",
"bytes": "6706"
}
],
"symlink_target": ""
} |
"""
Django settings for santicms project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0d&)bq@n98=*k^438m6ifkam9o#8s+-cmvk!a6*lku76w)#c#+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'mycms.apps.MycmsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'santicms.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'santicms.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "9a8e4d9bf28915a6e78799edde56091c",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 91,
"avg_line_length": 27.879310344827587,
"alnum_prop": 0.6914038342609771,
"repo_name": "daimon99/santicms",
"id": "cdaabda4c863f7f3cbbc68b90633caaa2e11af16",
"size": "3234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/santicms/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "287"
},
{
"name": "CSS",
"bytes": "239"
},
{
"name": "HTML",
"bytes": "5191"
},
{
"name": "Python",
"bytes": "24220"
}
],
"symlink_target": ""
} |
"""
auto rule template
~~~~
:author: LoRexxar <[email protected]>
:homepage: https://github.com/LoRexxar/Kunlun-M
:license: MIT, see LICENSE for more details.
:copyright: Copyright (c) 2017 LoRexxar. All rights reserved
"""
from utils.api import *
class CVI_1001():
"""
rule class
"""
def __init__(self):
self.svid = 1001
self.language = "php"
self.author = "LoRexxar/wufeifei"
self.vulnerability = "SSRF"
self.description = "cURL的函数相应函数可控,可能会造成SSRF漏洞。"
self.level = 7
# status
self.status = True
# 部分配置
self.match_mode = "vustomize-match"
self.match = r"curl_setopt\s*\(.*,\s*CURLOPT_URL\s*,(.*)\)"
# for solidity
self.match_name = None
self.black_list = None
# for chrome ext
self.keyword = None
# for regex
self.unmatch = None
self.vul_function = "curl_setopt"
def main(self, regex_string):
"""
regex string input
just for curl
:return:
"""
sql_sen = regex_string[0]
reg = "\$[\w+\->]*"
if re.search(reg, sql_sen, re.I):
p = re.compile(reg)
match = p.findall(sql_sen)
return match
return None
| {
"content_hash": "31ec76d85c7398d58108aa16785afef0",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 67,
"avg_line_length": 22.389830508474578,
"alnum_prop": 0.5253595760787282,
"repo_name": "LoRexxar/Cobra-W",
"id": "a5a09dc92af8d2bb4f8d6c2cdf0092b08bb6aed1",
"size": "1390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rules/php/CVI_1001.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "204"
},
{
"name": "Hack",
"bytes": "82"
},
{
"name": "Java",
"bytes": "45"
},
{
"name": "PHP",
"bytes": "6172"
},
{
"name": "Python",
"bytes": "441482"
}
],
"symlink_target": ""
} |
'''-------------------------------------------------------------------------
Copyright IBM Corp. 2015, 2015 All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
Limitations under the License.
-------------------------------------------------------------------------'''
from dragon.engine.clients import Clients
from dragon.openstack.common import log as logging
from dragon.workload_policy.actions import action
from dragon.workload_policy.actions import action_execution as ae
from oslo.config import cfg
from eventlet import greenthread
from dragon.template.heat_template import HeatVolumeResource
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class VolumeSnapshotAction(action.Action):
is_global = False
def __init__(self, context):
self.clients = Clients(context)
self._name = None
self._id = None
self._backup_id = None
self._resource_id = None
# super(action.Action, self).__init__(workload_action_excution_id)
def protect(self, cntx, workload_action_excution_id, resource_id,
container_name):
volume = self.clients.cinder().volumes.get(resource_id)
self._name = volume.name
self._id = volume.id
self._resource_id = resource_id
volume_snapshot_exection =\
ae.ActionExecution(workload_action_excution_id,
resource_id, self.id)
result = self._replicate_volume_to_DR(cntx, volume, container_name,
volume_snapshot_exection)
return result
def generate_template(self, context, template_gen):
resource = HeatVolumeResource(self._name, self._id, self._backup_id)
template_gen.add_volume(resource)
def failover(self, context, resource_id, resource_data, container_name):
return self._restore_volumes_from_swift(context, resource_id,
resource_data,
container_name)
def _restore_volumes_from_swift(self, context, resource_id,
resource_data, container_name):
success = False
cinder_client = self.clients.cinder()
dr_backup = cinder_client.backups.import_record(
resource_data['backup_service'],
resource_data['backup_url'])
dr_backup_id = dr_backup['id']
temp_dr_backup = cinder_client.backups.get(dr_backup_id)
LOG.debug("cinder backup status %s" % temp_dr_backup.status)
while temp_dr_backup.status == "creating":
greenthread.sleep(1)
temp_dr_backup = cinder_client.backups.get(dr_backup_id)
if temp_dr_backup.status == "available":
# volume_snapshot_exection.set_status(context, 'ready')
success = True
LOG.debug("cinder backup status %s" % temp_dr_backup)
self._name = temp_dr_backup.name
self._id = temp_dr_backup.volume_id # Remove this field!
self._backup_id = dr_backup_id
return success
def _replicate_volume_to_DR(self, context, volume, container_name,
action_excution):
metadata = volume.metadata
c_client = self.clients.cinder()
LOG.debug("cloning volume %s" % (volume.id))
clone_volume = c_client.volumes.create(volume.size,
source_volid=volume.id)
clone_metadata = clone_volume.metadata
action_excution.set_status(context, 'cloning')
LOG.debug("clone_volume.id %s" % (clone_volume.id))
temp_vol = c_client.volumes.get(clone_volume.id)
#LOG.debug("temp_vol.status %s" % (temp_vol.status))
backup_rec = None
while (temp_vol.status == "creating"):
greenthread.sleep(1)
temp_vol = c_client.volumes.get(clone_volume.id)
#LOG.debug("temp_vol.status %s" % (temp_vol.status))
if temp_vol.status == "available":
LOG.debug("creating backup %s" % (clone_volume.id))
backup_store = c_client.backups.create(clone_volume.id,
container=container_name,
name=volume.name)
action_excution.set_status(context, 'backup')
temp_back = c_client.backups.get(backup_store.id)
self._backup_id = backup_store.id
#LOG.debug("temp_back.status %s" % (temp_back.status))
while temp_back.status == "creating":
greenthread.sleep(1)
temp_back = c_client.backups.get(backup_store.id)
#LOG.debug("temp_back.status %s" % (temp_back.status))
if temp_back.status == "available":
metadata['clone_backup_id'] = backup_store.id
LOG.debug("exporting backup %s" % (backup_store.id))
backup_rec = c_client.backups.export_record(backup_store.id)
dr_state = "Protected"
# TODO(Oshrit): Cleanup after exported to Swift
# cleanup of bakcup record after export finished
self._cleanup(context, c_client, clone_volume.id,
backup_store.id)
else:
dr_state = 'DR clone backup failed'
else:
dr_state = 'DR clone failed'
action_excution.set_status(context, dr_state)
LOG.debug("dr_state %s" % (dr_state))
return dr_state, backup_rec
def _cleanup(self, context, client, snapshot_id, backup_id):
client.volumes.delete(snapshot_id)
# client.backups.delete(backup_id)
| {
"content_hash": "75271a80e60d675839eaf9de1ccf98c5",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 76,
"avg_line_length": 44.471830985915496,
"alnum_prop": 0.5703879651623119,
"repo_name": "os-cloud-storage/openstack-workload-disaster-recovery",
"id": "2d586cd4c74c119ee527ab5bb6be38f8d2e247ae",
"size": "6360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dragon/workload_policy/actions/plugins/volume_snapshot_action.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4930"
},
{
"name": "Python",
"bytes": "758400"
},
{
"name": "Shell",
"bytes": "24692"
}
],
"symlink_target": ""
} |
"""
Tests for pika.connection.Connection
"""
# Suppress pylint warnings concerning access to protected member
# pylint: disable=W0212
# Suppress pylint messages concerning missing docstrings
# pylint: disable=C0111
# Suppress pylint messages concerning invalid method name
# pylint: disable=C0103
try:
import mock
except ImportError:
from unittest import mock # pylint: disable=E0611
import random
import platform
try:
import unittest2 as unittest
except ImportError:
import unittest
import pika
from pika import connection
from pika import channel
from pika import credentials
from pika import exceptions
from pika import frame
from pika import spec
from pika.compat import xrange
def callback_method():
"""Callback method to use in tests"""
pass
class ConnectionTests(unittest.TestCase): # pylint: disable=R0904
def setUp(self):
class ChannelTemplate(channel.Channel):
channel_number = None
with mock.patch('pika.connection.Connection.connect'):
self.connection = connection.Connection()
self.connection._set_connection_state(
connection.Connection.CONNECTION_OPEN)
self.channel = mock.Mock(spec=ChannelTemplate)
self.channel.channel_number = 1
self.channel.is_open = True
self.channel.is_closing = False
self.channel.is_closed = False
self.connection._channels[self.channel.channel_number] = self.channel
def tearDown(self):
del self.connection
del self.channel
@mock.patch('pika.connection.Connection._on_close_ready')
def test_close_calls_on_close_ready_when_no_channels(
self,
on_close_ready_mock):
self.connection._channels = dict()
self.connection.close()
self.assertTrue(on_close_ready_mock.called,
'on_close_ready_mock should have been called')
@mock.patch('pika.connection.Connection._on_close_ready')
def test_close_closes_open_channels(self, on_close_ready):
self.connection.close()
self.channel.close.assert_called_once_with(200, 'Normal shutdown')
self.assertFalse(on_close_ready.called)
@mock.patch('pika.connection.Connection._on_close_ready')
def test_close_closes_opening_channels(self, on_close_ready):
self.channel.is_open = False
self.channel.is_closing = False
self.channel.is_closed = False
self.connection.close()
self.channel.close.assert_called_once_with(200, 'Normal shutdown')
self.assertFalse(on_close_ready.called)
@mock.patch('pika.connection.Connection._on_close_ready')
def test_close_does_not_close_closing_channels(self, on_close_ready):
self.channel.is_open = False
self.channel.is_closing = True
self.channel.is_closed = False
self.connection.close()
self.assertFalse(self.channel.close.called)
self.assertFalse(on_close_ready.called)
@mock.patch('pika.connection.Connection._close_channels')
def test_close_bails_out_if_already_closed_or_closing(
self, close_channels):
for closed_state in (self.connection.CONNECTION_CLOSED,
self.connection.CONNECTION_CLOSING):
self.connection.connection_state = closed_state
self.connection.close()
self.assertFalse(self.channel.close.called)
self.assertEqual(self.connection.connection_state, closed_state)
@mock.patch('logging.Logger.critical')
def test_deliver_frame_to_channel_with_frame_for_unknown_channel(
self,
critical_mock):
unknown_channel_num = 99
self.assertNotIn(unknown_channel_num, self.connection._channels)
unexpected_frame = frame.Method(unknown_channel_num, mock.Mock())
self.connection._deliver_frame_to_channel(unexpected_frame)
critical_mock.assert_called_once_with(
'Received %s frame for unregistered channel %i on %s',
unexpected_frame.NAME, unknown_channel_num, self.connection)
@mock.patch('pika.connection.Connection._on_close_ready')
def test_on_channel_cleanup_with_closing_channels(self, on_close_ready):
"""if connection is closing but closing channels remain, do not call \
_on_close_ready
"""
self.channel.is_open = False
self.channel.is_closing = True
self.channel.is_closed = False
self.connection.close()
self.assertFalse(on_close_ready.called,
'_on_close_ready should not have been called')
@mock.patch('pika.connection.Connection._on_close_ready')
def test_on_channel_cleanup_closing_state_last_channel_calls_on_close_ready(
self,
on_close_ready_mock):
self.connection.connection_state = self.connection.CONNECTION_CLOSING
self.connection._on_channel_cleanup(self.channel)
self.assertTrue(on_close_ready_mock.called,
'_on_close_ready should have been called')
@mock.patch('pika.connection.Connection._on_close_ready')
def test_on_channel_cleanup_closing_state_more_channels_no_on_close_ready(
self,
on_close_ready_mock):
self.connection.connection_state = self.connection.CONNECTION_CLOSING
channel_mock = mock.Mock(channel_number=99, is_closing=True)
self.connection._channels[99] = channel_mock
self.connection._on_channel_cleanup(self.channel)
self.assertFalse(on_close_ready_mock.called,
'_on_close_ready should not have been called')
@mock.patch('pika.connection.Connection._on_close_ready')
def test_on_channel_cleanup_non_closing_state(self, on_close_ready):
"""if connection isn't closing _on_close_ready should not be called"""
self.connection._on_channel_cleanup(mock.Mock())
self.assertFalse(on_close_ready.called,
'_on_close_ready should not have been called')
def test_on_terminate_cleans_up(self):
"""_on_terminate cleans up heartbeat, adapter, and channels"""
heartbeat = mock.Mock()
self.connection.heartbeat = heartbeat
self.connection._adapter_disconnect = mock.Mock()
self.connection._on_terminate(-1, 'Undefined')
heartbeat.stop.assert_called_once_with()
self.connection._adapter_disconnect.assert_called_once_with()
self.channel._on_close_meta.assert_called_once_with(-1, 'Undefined')
self.assertTrue(self.connection.is_closed)
def test_on_terminate_invokes_connection_closed_callback(self):
"""_on_terminate invokes `Connection.ON_CONNECTION_CLOSED` callbacks"""
self.connection.callbacks.process = mock.Mock(
wraps=self.connection.callbacks.process)
self.connection._adapter_disconnect = mock.Mock()
self.connection._on_terminate(1, 'error text')
self.connection.callbacks.process.assert_called_once_with(
0, self.connection.ON_CONNECTION_CLOSED,
self.connection, self.connection,
1, 'error text')
with self.assertRaises(AssertionError):
self.connection.callbacks.process.assert_any_call(
0, self.connection.ON_CONNECTION_ERROR,
self.connection, self.connection,
mock.ANY)
def test_on_terminate_invokes_protocol_on_connection_error_and_closed(self):
"""_on_terminate invokes `ON_CONNECTION_ERROR` with \
`IncompatibleProtocolError` and `ON_CONNECTION_CLOSED` callbacks"""
with mock.patch.object(self.connection.callbacks, 'process'):
self.connection._adapter_disconnect = mock.Mock()
self.connection._set_connection_state(
self.connection.CONNECTION_PROTOCOL)
self.connection._on_terminate(1, 'error text')
self.assertEqual(self.connection.callbacks.process.call_count, 2)
self.connection.callbacks.process.assert_any_call(
0, self.connection.ON_CONNECTION_ERROR,
self.connection, self.connection,
mock.ANY)
conn_exc = self.connection.callbacks.process.call_args_list[0][0][4]
self.assertIs(type(conn_exc), exceptions.IncompatibleProtocolError)
self.assertSequenceEqual(conn_exc.args, [1, 'error text'])
self.connection.callbacks.process.assert_any_call(
0, self.connection.ON_CONNECTION_CLOSED,
self.connection, self.connection,
1, 'error text')
def test_on_terminate_invokes_auth_on_connection_error_and_closed(self):
"""_on_terminate invokes `ON_CONNECTION_ERROR` with \
`ProbableAuthenticationError` and `ON_CONNECTION_CLOSED` callbacks"""
with mock.patch.object(self.connection.callbacks, 'process'):
self.connection._adapter_disconnect = mock.Mock()
self.connection._set_connection_state(
self.connection.CONNECTION_START)
self.connection._on_terminate(1, 'error text')
self.assertEqual(self.connection.callbacks.process.call_count, 2)
self.connection.callbacks.process.assert_any_call(
0, self.connection.ON_CONNECTION_ERROR,
self.connection, self.connection,
mock.ANY)
conn_exc = self.connection.callbacks.process.call_args_list[0][0][4]
self.assertIs(type(conn_exc),
exceptions.ProbableAuthenticationError)
self.assertSequenceEqual(conn_exc.args, [1, 'error text'])
self.connection.callbacks.process.assert_any_call(
0, self.connection.ON_CONNECTION_CLOSED,
self.connection, self.connection,
1, 'error text')
def test_on_terminate_invokes_access_denied_on_connection_error_and_closed(
self):
"""_on_terminate invokes `ON_CONNECTION_ERROR` with \
`ProbableAccessDeniedError` and `ON_CONNECTION_CLOSED` callbacks"""
with mock.patch.object(self.connection.callbacks, 'process'):
self.connection._adapter_disconnect = mock.Mock()
self.connection._set_connection_state(
self.connection.CONNECTION_TUNE)
self.connection._on_terminate(1, 'error text')
self.assertEqual(self.connection.callbacks.process.call_count, 2)
self.connection.callbacks.process.assert_any_call(
0, self.connection.ON_CONNECTION_ERROR,
self.connection, self.connection,
mock.ANY)
conn_exc = self.connection.callbacks.process.call_args_list[0][0][4]
self.assertIs(type(conn_exc), exceptions.ProbableAccessDeniedError)
self.assertSequenceEqual(conn_exc.args, [1, 'error text'])
self.connection.callbacks.process.assert_any_call(
0, self.connection.ON_CONNECTION_CLOSED,
self.connection, self.connection,
1, 'error text')
@mock.patch('pika.connection.Connection.connect')
def test_new_conn_should_use_first_channel(self, connect):
"""_next_channel_number in new conn should always be 1"""
conn = connection.Connection()
self.assertEqual(1, conn._next_channel_number())
def test_next_channel_number_returns_lowest_unused(self):
"""_next_channel_number must return lowest available channel number"""
for channel_num in xrange(1, 50):
self.connection._channels[channel_num] = True
expectation = random.randint(5, 49)
del self.connection._channels[expectation]
self.assertEqual(self.connection._next_channel_number(), expectation)
def test_add_callbacks(self):
"""make sure the callback adding works"""
self.connection.callbacks = mock.Mock(spec=self.connection.callbacks)
for test_method, expected_key in (
(self.connection.add_backpressure_callback,
self.connection.ON_CONNECTION_BACKPRESSURE),
(self.connection.add_on_open_callback,
self.connection.ON_CONNECTION_OPEN),
(self.connection.add_on_close_callback,
self.connection.ON_CONNECTION_CLOSED)):
self.connection.callbacks.reset_mock()
test_method(callback_method)
self.connection.callbacks.add.assert_called_once_with(
0, expected_key, callback_method, False)
def test_add_on_close_callback(self):
"""make sure the add on close callback is added"""
self.connection.callbacks = mock.Mock(spec=self.connection.callbacks)
self.connection.add_on_open_callback(callback_method)
self.connection.callbacks.add.assert_called_once_with(
0, self.connection.ON_CONNECTION_OPEN, callback_method, False)
def test_add_on_open_error_callback(self):
"""make sure the add on open error callback is added"""
self.connection.callbacks = mock.Mock(spec=self.connection.callbacks)
#Test with remove default first (also checks default is True)
self.connection.add_on_open_error_callback(callback_method)
self.connection.callbacks.remove.assert_called_once_with(
0, self.connection.ON_CONNECTION_ERROR,
self.connection._on_connection_error)
self.connection.callbacks.add.assert_called_once_with(
0, self.connection.ON_CONNECTION_ERROR, callback_method, False)
def test_channel(self):
"""test the channel method"""
self.connection._next_channel_number = mock.Mock(return_value=42)
test_channel = mock.Mock(spec=channel.Channel)
self.connection._create_channel = mock.Mock(return_value=test_channel)
self.connection._add_channel_callbacks = mock.Mock()
ret_channel = self.connection.channel(callback_method)
self.assertEqual(test_channel, ret_channel)
self.connection._create_channel.assert_called_once_with(42,
callback_method)
self.connection._add_channel_callbacks.assert_called_once_with(42)
test_channel.open.assert_called_once_with()
def test_channel_on_closed_connection_raises_connection_closed(self):
self.connection.connection_state = self.connection.CONNECTION_CLOSED
with self.assertRaises(exceptions.ConnectionClosed):
self.connection.channel(lambda *args: None)
def test_channel_on_closing_connection_raises_connection_closed(self):
self.connection.connection_state = self.connection.CONNECTION_CLOSING
with self.assertRaises(exceptions.ConnectionClosed):
self.connection.channel(lambda *args: None)
def test_channel_on_init_connection_raises_connection_closed(self):
self.connection.connection_state = self.connection.CONNECTION_INIT
with self.assertRaises(exceptions.ConnectionClosed):
self.connection.channel(lambda *args: None)
def test_channel_on_start_connection_raises_connection_closed(self):
self.connection.connection_state = self.connection.CONNECTION_START
with self.assertRaises(exceptions.ConnectionClosed):
self.connection.channel(lambda *args: None)
def test_channel_on_protocol_connection_raises_connection_closed(self):
self.connection.connection_state = self.connection.CONNECTION_PROTOCOL
with self.assertRaises(exceptions.ConnectionClosed):
self.connection.channel(lambda *args: None)
def test_channel_on_tune_connection_raises_connection_closed(self):
self.connection.connection_state = self.connection.CONNECTION_TUNE
with self.assertRaises(exceptions.ConnectionClosed):
self.connection.channel(lambda *args: None)
@mock.patch('pika.frame.ProtocolHeader')
def test_connect(self, frame_protocol_header):
"""make sure the connect method sets the state and sends a frame"""
self.connection._adapter_connect = mock.Mock(return_value=None)
self.connection._send_frame = mock.Mock()
frame_protocol_header.spec = frame.ProtocolHeader
frame_protocol_header.return_value = 'frame object'
self.connection.connect()
self.assertEqual(self.connection.CONNECTION_PROTOCOL,
self.connection.connection_state)
self.connection._send_frame.assert_called_once_with('frame object')
def test_connect_reconnect(self):
"""try the different reconnect logic, check state & other class vars"""
self.connection._adapter_connect = mock.Mock(return_value='error')
self.connection.callbacks = mock.Mock(spec=self.connection.callbacks)
self.connection.remaining_connection_attempts = 2
self.connection.params.retry_delay = 555
self.connection.params.connection_attempts = 99
self.connection.add_timeout = mock.Mock()
#first failure
self.connection.connect()
self.connection.add_timeout.assert_called_once_with(
555, self.connection.connect)
self.assertEqual(1, self.connection.remaining_connection_attempts)
self.assertFalse(self.connection.callbacks.process.called)
self.assertEqual(self.connection.CONNECTION_INIT,
self.connection.connection_state)
#fail with no attempts remaining
self.connection.add_timeout.reset_mock()
self.connection.connect()
self.assertFalse(self.connection.add_timeout.called)
self.assertEqual(99, self.connection.remaining_connection_attempts)
self.connection.callbacks.process.assert_called_once_with(
0, self.connection.ON_CONNECTION_ERROR, self.connection,
self.connection, 'error')
self.assertEqual(self.connection.CONNECTION_CLOSED,
self.connection.connection_state)
def test_client_properties(self):
"""make sure client properties has some important keys"""
client_props = self.connection._client_properties
self.assertTrue(isinstance(client_props, dict))
for required_key in ('product', 'platform', 'capabilities',
'information', 'version'):
self.assertTrue(required_key in client_props,
'%s missing' % required_key)
def test_client_properties_default(self):
expectation = {
'product': connection.PRODUCT,
'platform': 'Python %s' % platform.python_version(),
'capabilities': {
'authentication_failure_close': True,
'basic.nack': True,
'connection.blocked': True,
'consumer_cancel_notify': True,
'publisher_confirms': True
},
'information': 'See http://pika.rtfd.org',
'version': pika.__version__
}
self.assertDictEqual(self.connection._client_properties, expectation)
def test_client_properties_override(self):
expectation = {
'capabilities': {
'authentication_failure_close': True,
'basic.nack': True,
'connection.blocked': True,
'consumer_cancel_notify': True,
'publisher_confirms': True
}
}
override = {'product': 'My Product',
'platform': 'Your platform',
'version': '0.1',
'information': 'this is my app'}
expectation.update(override)
params = connection.ConnectionParameters(client_properties=override)
with mock.patch('pika.connection.Connection.connect'):
conn = connection.Connection(params)
self.assertDictEqual(conn._client_properties, expectation)
def test_set_backpressure_multiplier(self):
"""test setting the backpressure multiplier"""
self.connection._backpressure_multiplier = None
self.connection.set_backpressure_multiplier(value=5)
self.assertEqual(5, self.connection._backpressure_multiplier)
def test_close_channels(self):
"""test closing all channels"""
self.connection.connection_state = self.connection.CONNECTION_OPEN
self.connection.callbacks = mock.Mock(spec=self.connection.callbacks)
opening_channel = mock.Mock(is_open=False,
is_closed=False,
is_closing=False)
open_channel = mock.Mock(is_open=True,
is_closed=False,
is_closing=False)
closing_channel = mock.Mock(is_open=False,
is_closed=False,
is_closing=True)
self.connection._channels = {
'openingc': opening_channel,
'openc': open_channel,
'closingc': closing_channel}
self.connection._close_channels(400, 'reply text')
opening_channel.close.assert_called_once_with(400, 'reply text')
open_channel.close.assert_called_once_with(400, 'reply text')
self.assertFalse(closing_channel.close.called)
self.assertTrue('openingc' in self.connection._channels)
self.assertTrue('openc' in self.connection._channels)
self.assertTrue('closingc' in self.connection._channels)
self.assertFalse(self.connection.callbacks.cleanup.called)
# Test on closed connection
self.connection.connection_state = self.connection.CONNECTION_CLOSED
with self.assertRaises(AssertionError):
self.connection._close_channels(200, 'reply text')
def test_on_connection_start(self):
"""make sure starting a connection sets the correct class vars"""
method_frame = mock.Mock()
method_frame.method = mock.Mock()
method_frame.method.mechanisms = str(credentials.PlainCredentials.TYPE)
method_frame.method.version_major = 0
method_frame.method.version_minor = 9
#This may be incorrectly mocked, or the code is wrong
#TODO: Code does hasattr check, should this be a has_key/in check?
method_frame.method.server_properties = {
'capabilities': {
'basic.nack': True,
'consumer_cancel_notify': False,
'exchange_exchange_bindings': False
}
}
#This will be called, but shoudl not be implmented here, just mock it
self.connection._flush_outbound = mock.Mock()
self.connection._on_connection_start(method_frame)
self.assertEqual(True, self.connection.basic_nack)
self.assertEqual(False, self.connection.consumer_cancel_notify)
self.assertEqual(False, self.connection.exchange_exchange_bindings)
self.assertEqual(False, self.connection.publisher_confirms)
@mock.patch('pika.heartbeat.HeartbeatChecker')
@mock.patch('pika.frame.Method')
def test_on_connection_tune(self, method, heartbeat_checker):
"""make sure on connection tune turns the connection params"""
heartbeat_checker.return_value = 'hearbeat obj'
self.connection._flush_outbound = mock.Mock()
marshal = mock.Mock(return_value='ab')
method.return_value = mock.Mock(marshal=marshal)
#may be good to test this here, but i don't want to test too much
self.connection._rpc = mock.Mock()
method_frame = mock.Mock()
method_frame.method = mock.Mock()
method_frame.method.channel_max = 40
method_frame.method.frame_max = 10000
method_frame.method.heartbeat = 10
self.connection.params.channel_max = 20
self.connection.params.frame_max = 20000
self.connection.params.heartbeat = 20
#Test
self.connection._on_connection_tune(method_frame)
#verfy
self.assertEqual(self.connection.CONNECTION_TUNE,
self.connection.connection_state)
self.assertEqual(20, self.connection.params.channel_max)
self.assertEqual(10000, self.connection.params.frame_max)
self.assertEqual(20, self.connection.params.heartbeat)
self.assertEqual(9992, self.connection._body_max_length)
heartbeat_checker.assert_called_once_with(self.connection, 20)
self.assertEqual(['ab'], list(self.connection.outbound_buffer))
self.assertEqual('hearbeat obj', self.connection.heartbeat)
# Repeat with smaller user heartbeat than broker
method_frame.method.heartbeat = 60
self.connection.params.heartbeat = 20
#Test
self.connection._on_connection_tune(method_frame)
#verfy
self.assertEqual(60, self.connection.params.heartbeat)
# Repeat with user deferring to server's heartbeat timeout
method_frame.method.heartbeat = 500
self.connection.params.heartbeat = None
#Test
self.connection._on_connection_tune(method_frame)
#verfy
self.assertEqual(500, self.connection.params.heartbeat)
# Repeat with user deferring to server's disabled heartbeat value
method_frame.method.heartbeat = 0
self.connection.params.heartbeat = None
#Test
self.connection._on_connection_tune(method_frame)
#verfy
self.assertEqual(0, self.connection.params.heartbeat)
# Repeat with user-disabled heartbeat
method_frame.method.heartbeat = 60
self.connection.params.heartbeat = 0
#Test
self.connection._on_connection_tune(method_frame)
#verfy
self.assertEqual(0, self.connection.params.heartbeat)
# Repeat with server-disabled heartbeat
method_frame.method.heartbeat = 0
self.connection.params.heartbeat = 60
#Test
self.connection._on_connection_tune(method_frame)
#verfy
self.assertEqual(0, self.connection.params.heartbeat)
# Repeat with both user/server disabled heartbeats
method_frame.method.heartbeat = 0
self.connection.params.heartbeat = 0
#Test
self.connection._on_connection_tune(method_frame)
#verfy
self.assertEqual(0, self.connection.params.heartbeat)
def test_on_connection_closed(self):
"""make sure connection close sends correct frames"""
method_frame = mock.Mock()
method_frame.method = mock.Mock(spec=spec.Connection.Close)
method_frame.method.reply_code = 1
method_frame.method.reply_text = 'hello'
self.connection._on_terminate = mock.Mock()
self.connection._on_connection_close(method_frame)
#Check
self.connection._on_terminate.assert_called_once_with(1, 'hello')
def test_on_connection_close_ok(self):
"""make sure _on_connection_close_ok terminates connection"""
method_frame = mock.Mock()
method_frame.method = mock.Mock(spec=spec.Connection.CloseOk)
self.connection.closing = (1, 'bye')
self.connection._on_terminate = mock.Mock()
self.connection._on_connection_close_ok(method_frame)
#Check
self.connection._on_terminate.assert_called_once_with(1, 'bye')
@mock.patch('pika.frame.decode_frame')
def test_on_data_available(self, decode_frame):
"""test on data available and process frame"""
data_in = ['data']
self.connection._frame_buffer = ['old_data']
for frame_type in (frame.Method, spec.Basic.Deliver, frame.Heartbeat):
frame_value = mock.Mock(spec=frame_type)
frame_value.frame_type = 2
frame_value.method = 2
frame_value.channel_number = 1
self.connection.bytes_received = 0
self.connection.heartbeat = mock.Mock()
self.connection.frames_received = 0
decode_frame.return_value = (2, frame_value)
self.connection._on_data_available(data_in)
#test value
self.assertListEqual([], self.connection._frame_buffer)
self.assertEqual(2, self.connection.bytes_received)
self.assertEqual(1, self.connection.frames_received)
if frame_type == frame.Heartbeat:
self.assertTrue(self.connection.heartbeat.received.called)
@mock.patch.object(connection.Connection, 'connect',
spec_set=connection.Connection.connect)
@mock.patch.object(connection.Connection,
'add_on_connection_blocked_callback')
@mock.patch.object(connection.Connection,
'add_on_connection_unblocked_callback')
def test_create_with_blocked_connection_timeout_config(
self,
add_on_unblocked_callback_mock,
add_on_blocked_callback_mock,
connect_mock):
conn = connection.Connection(
parameters=connection.ConnectionParameters(
blocked_connection_timeout=60))
# Check
conn.add_on_connection_blocked_callback.assert_called_once_with(
conn._on_connection_blocked)
conn.add_on_connection_unblocked_callback.assert_called_once_with(
conn._on_connection_unblocked)
@mock.patch.object(connection.Connection, 'add_timeout')
@mock.patch.object(connection.Connection, 'connect',
spec_set=connection.Connection.connect)
def test_connection_blocked_sets_timer(
self,
connect_mock,
add_timeout_mock):
conn = connection.Connection(
parameters=connection.ConnectionParameters(
blocked_connection_timeout=60))
conn._on_connection_blocked(
mock.Mock(name='frame.Method(Connection.Blocked)'))
# Check
conn.add_timeout.assert_called_once_with(
60,
conn._on_blocked_connection_timeout)
self.assertIsNotNone(conn._blocked_conn_timer)
@mock.patch.object(connection.Connection, 'add_timeout')
@mock.patch.object(connection.Connection, 'connect',
spec_set=connection.Connection.connect)
def test_multi_connection_blocked_in_a_row_sets_timer_once(
self,
connect_mock,
add_timeout_mock):
conn = connection.Connection(
parameters=connection.ConnectionParameters(
blocked_connection_timeout=60))
# Simulate Connection.Blocked trigger
conn._on_connection_blocked(
mock.Mock(name='frame.Method(Connection.Blocked)'))
# Check
conn.add_timeout.assert_called_once_with(
60,
conn._on_blocked_connection_timeout)
self.assertIsNotNone(conn._blocked_conn_timer)
timer = conn._blocked_conn_timer
# Simulate Connection.Blocked trigger again
conn._on_connection_blocked(
mock.Mock(name='frame.Method(Connection.Blocked)'))
self.assertEqual(conn.add_timeout.call_count, 1)
self.assertIs(conn._blocked_conn_timer, timer)
@mock.patch.object(connection.Connection, '_on_terminate')
@mock.patch.object(connection.Connection, 'add_timeout',
spec_set=connection.Connection.add_timeout)
@mock.patch.object(connection.Connection, 'connect',
spec_set=connection.Connection.connect)
def test_blocked_connection_timeout_teminates_connection(
self,
connect_mock,
add_timeout_mock,
on_terminate_mock):
conn = connection.Connection(
parameters=connection.ConnectionParameters(
blocked_connection_timeout=60))
conn._on_connection_blocked(
mock.Mock(name='frame.Method(Connection.Blocked)'))
conn._on_blocked_connection_timeout()
# Check
conn._on_terminate.assert_called_once_with(
connection.InternalCloseReasons.BLOCKED_CONNECTION_TIMEOUT,
'Blocked connection timeout expired')
self.assertIsNone(conn._blocked_conn_timer)
@mock.patch.object(connection.Connection, 'remove_timeout')
@mock.patch.object(connection.Connection, 'add_timeout',
spec_set=connection.Connection.add_timeout)
@mock.patch.object(connection.Connection, 'connect',
spec_set=connection.Connection.connect)
def test_connection_unblocked_removes_timer(
self,
connect_mock,
add_timeout_mock,
remove_timeout_mock):
conn = connection.Connection(
parameters=connection.ConnectionParameters(
blocked_connection_timeout=60))
conn._on_connection_blocked(
mock.Mock(name='frame.Method(Connection.Blocked)'))
self.assertIsNotNone(conn._blocked_conn_timer)
timer = conn._blocked_conn_timer
conn._on_connection_unblocked(
mock.Mock(name='frame.Method(Connection.Unblocked)'))
# Check
conn.remove_timeout.assert_called_once_with(timer)
self.assertIsNone(conn._blocked_conn_timer)
@mock.patch.object(connection.Connection, 'remove_timeout')
@mock.patch.object(connection.Connection, 'add_timeout',
spec_set=connection.Connection.add_timeout)
@mock.patch.object(connection.Connection, 'connect',
spec_set=connection.Connection.connect)
def test_multi_connection_unblocked_in_a_row_removes_timer_once(
self,
connect_mock,
add_timeout_mock,
remove_timeout_mock):
conn = connection.Connection(
parameters=connection.ConnectionParameters(
blocked_connection_timeout=60))
# Simulate Connection.Blocked
conn._on_connection_blocked(
mock.Mock(name='frame.Method(Connection.Blocked)'))
self.assertIsNotNone(conn._blocked_conn_timer)
timer = conn._blocked_conn_timer
# Simulate Connection.Unblocked
conn._on_connection_unblocked(
mock.Mock(name='frame.Method(Connection.Unblocked)'))
# Check
conn.remove_timeout.assert_called_once_with(timer)
self.assertIsNone(conn._blocked_conn_timer)
# Simulate Connection.Unblocked again
conn._on_connection_unblocked(
mock.Mock(name='frame.Method(Connection.Unblocked)'))
self.assertEqual(conn.remove_timeout.call_count, 1)
self.assertIsNone(conn._blocked_conn_timer)
@mock.patch.object(connection.Connection, 'remove_timeout')
@mock.patch.object(connection.Connection, 'add_timeout',
spec_set=connection.Connection.add_timeout)
@mock.patch.object(connection.Connection, 'connect',
spec_set=connection.Connection.connect)
@mock.patch.object(connection.Connection, '_adapter_disconnect',
spec_set=connection.Connection._adapter_disconnect)
def test_on_terminate_removes_timer(
self,
adapter_disconnect_mock,
connect_mock,
add_timeout_mock,
remove_timeout_mock):
conn = connection.Connection(
parameters=connection.ConnectionParameters(
blocked_connection_timeout=60))
conn._on_connection_blocked(
mock.Mock(name='frame.Method(Connection.Blocked)'))
self.assertIsNotNone(conn._blocked_conn_timer)
timer = conn._blocked_conn_timer
conn._on_terminate(0, 'test_on_terminate_removes_timer')
# Check
conn.remove_timeout.assert_called_once_with(timer)
self.assertIsNone(conn._blocked_conn_timer)
| {
"content_hash": "5938bb725f3acbceec24a8c700577199",
"timestamp": "",
"source": "github",
"line_count": 859,
"max_line_length": 80,
"avg_line_length": 41.80209545983702,
"alnum_prop": 0.6443132449593405,
"repo_name": "Zephor5/pika",
"id": "0ffe78e667ce077c04a44d1de8f0d169cec71070",
"size": "35908",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit/connection_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "842521"
}
],
"symlink_target": ""
} |
from django.db import models
# If ticket #1578 ever slips back in, these models will not be able to be
# created (the field names being lower-cased versions of their opposite
# classes is important here).
class First(models.Model):
second = models.IntegerField()
class Second(models.Model):
first = models.ForeignKey(First, related_name = 'the_first')
# Protect against repetition of #1839, #2415 and #2536.
class Third(models.Model):
name = models.CharField(maxlength=20)
third = models.ForeignKey('self', null=True, related_name='child_set')
class Parent(models.Model):
name = models.CharField(maxlength=20)
bestchild = models.ForeignKey('Child', null=True, related_name='favored_by')
class Child(models.Model):
name = models.CharField(maxlength=20)
parent = models.ForeignKey(Parent)
__test__ = {'API_TESTS':"""
>>> Third.AddManipulator().save(dict(id='3', name='An example', another=None))
<Third: Third object>
>>> parent = Parent(name = 'fred')
>>> parent.save()
>>> Child.AddManipulator().save(dict(name='bam-bam', parent=parent.id))
<Child: Child object>
"""}
| {
"content_hash": "c15ca447b3db5c2c831ff80365ac01af",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 80,
"avg_line_length": 32.61764705882353,
"alnum_prop": 0.7060414788097386,
"repo_name": "gabelula/b-counted",
"id": "8ddec98da44b04b95f2874c43357e54a3c9f79c3",
"size": "1109",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": ".google_appengine/lib/django/tests/regressiontests/many_to_one_regress/models.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400492"
},
{
"name": "JavaScript",
"bytes": "166344"
},
{
"name": "Python",
"bytes": "7138909"
},
{
"name": "Shell",
"bytes": "594"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from rest_framework import permissions as drf_permissions
from api.base.utils import get_user_auth
from osf.models.action import ReviewAction
from osf.models.mixins import ReviewableMixin, ReviewProviderMixin
from osf.utils.workflows import DefaultTriggers
from osf.utils import permissions as osf_permissions
# Required permission to perform each action. `None` means no permissions required.
TRIGGER_PERMISSIONS = {
DefaultTriggers.SUBMIT.value: None,
DefaultTriggers.ACCEPT.value: 'accept_submissions',
DefaultTriggers.REJECT.value: 'reject_submissions',
DefaultTriggers.EDIT_COMMENT.value: 'edit_review_comments',
}
class ReviewActionPermission(drf_permissions.BasePermission):
def has_object_permission(self, request, view, obj):
auth = get_user_auth(request)
if auth.user is None:
return False
target = None
provider = None
if isinstance(obj, ReviewAction):
target = obj.target
provider = target.provider
elif isinstance(obj, ReviewableMixin):
target = obj
provider = target.provider
elif isinstance(obj, ReviewProviderMixin):
provider = obj
else:
raise ValueError('Not a reviews-related model: {}'.format(obj))
serializer = view.get_serializer()
if request.method in drf_permissions.SAFE_METHODS:
# Moderators and node contributors can view actions
is_node_contributor = target is not None and target.node.has_permission(auth.user, osf_permissions.READ)
return is_node_contributor or auth.user.has_perm('view_actions', provider)
else:
# Moderators and node admins can trigger state changes.
is_node_admin = target is not None and target.node.has_permission(auth.user, osf_permissions.ADMIN)
if not (is_node_admin or auth.user.has_perm('view_submissions', provider)):
return False
# User can trigger state changes on this reviewable, but can they use this trigger in particular?
serializer = view.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
trigger = serializer.validated_data.get('trigger')
permission = TRIGGER_PERMISSIONS[trigger]
return permission is None or request.user.has_perm(permission, target.provider)
| {
"content_hash": "7cafa46c7df0b65907c38963a24a7ab1",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 116,
"avg_line_length": 43.80357142857143,
"alnum_prop": 0.6865063187933144,
"repo_name": "chennan47/osf.io",
"id": "0986b25f122d3a76f2d85fe6ad4c9d080423d2d5",
"size": "2477",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "api/actions/permissions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "110839"
},
{
"name": "HTML",
"bytes": "236223"
},
{
"name": "JavaScript",
"bytes": "1830647"
},
{
"name": "Mako",
"bytes": "665098"
},
{
"name": "Python",
"bytes": "7650137"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
from setuptools import find_packages, setup
version = '0.2.1'
setup(
name='streamcat',
packages=find_packages(exclude=('tests', 'docs')),
version=version,
description='Encode and decode concatenated objects as streams',
long_description=open('README.rst', 'r').read(),
author='Bertrand Bonnefoy-Claudet',
author_email='[email protected]',
url='https://github.com/cryptosense/streamcat',
download_url='https://github.com/cryptosense/streamcat/tarball/v{}'.format(version),
keywords=['stream', 'file', 'json'],
license='BSD',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
install_requires=[],
)
| {
"content_hash": "ea8569d34777a4220934c5482b207332",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 88,
"avg_line_length": 38.36666666666667,
"alnum_prop": 0.63249348392702,
"repo_name": "cryptosense/streamcat",
"id": "021664b3652836fb6f66b93dac9de811a8db1818",
"size": "1151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "5815"
}
],
"symlink_target": ""
} |
"""
Implementation of insertion sort
"""
from random import randint
def insert(arr, pos, value):
"""
inserts the value at its place in the subarray arr[0:pos-1]
"""
idx = pos - 1
while(idx >= 0 and arr[idx] > value):
arr[idx+1] = arr[idx]
idx -= 1
arr[idx + 1] = value
def insertionSort(arr, left, right):
"""
sorts the sub-array arr[left:right] using insertion sort in ascending order
"""
for idx in range(left+1, right+1):
insert(arr, idx, arr[idx])
def testInsertionSort():
for testCount in range(20):
length = randint(0, 25)
arr = [randint(-100, 100) for e in range(length)]
print "Array before sorting:", arr
insertionSort(arr, 0, length-1)
print "Array after sorting:", arr
print "-------------------------------------------------------"
testInsertionSort()
| {
"content_hash": "736af1d20fa1aed09c4b3988b99edaf3",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 79,
"avg_line_length": 26.818181818181817,
"alnum_prop": 0.56045197740113,
"repo_name": "apurushottam/Algorithms",
"id": "17b07a190f1e1c5ebb7c8d03412662e80cfc8c7d",
"size": "885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2-Sorting/insertionSort.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8399"
}
],
"symlink_target": ""
} |
"""
The MIT License (MIT)
Copyright (c) 2015-2019 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import re
import inspect
import discord
from .errors import BadArgument, NoPrivateMessage
__all__ = (
'Converter',
'MemberConverter',
'UserConverter',
'MessageConverter',
'TextChannelConverter',
'InviteConverter',
'RoleConverter',
'GameConverter',
'ColourConverter',
'VoiceChannelConverter',
'EmojiConverter',
'PartialEmojiConverter',
'CategoryChannelConverter',
'IDConverter',
'clean_content',
'Greedy',
)
def _get_from_guilds(bot, getter, argument):
result = None
for guild in bot.guilds:
result = getattr(guild, getter)(argument)
if result:
return result
return result
_utils_get = discord.utils.get
class Converter:
"""The base class of custom converters that require the :class:`.Context`
to be passed to be useful.
This allows you to implement converters that function similar to the
special cased ``discord`` classes.
Classes that derive from this should override the :meth:`~.Converter.convert`
method to do its conversion logic. This method must be a :ref:`coroutine <coroutine>`.
"""
async def convert(self, ctx, argument):
"""|coro|
The method to override to do conversion logic.
If an error is found while converting, it is recommended to
raise a :exc:`.CommandError` derived exception as it will
properly propagate to the error handlers.
Parameters
-----------
ctx: :class:`.Context`
The invocation context that the argument is being used in.
argument: :class:`str`
The argument that is being converted.
"""
raise NotImplementedError('Derived classes need to implement this.')
class IDConverter(Converter):
def __init__(self):
self._id_regex = re.compile(r'([0-9]{15,21})$')
super().__init__()
def _get_id_match(self, argument):
return self._id_regex.match(argument)
class MemberConverter(IDConverter):
"""Converts to a :class:`~discord.Member`.
All lookups are via the local guild. If in a DM context, then the lookup
is done by the global cache.
The lookup strategy is as follows (in order):
1. Lookup by ID.
2. Lookup by mention.
3. Lookup by name#discrim
4. Lookup by name
5. Lookup by nickname
"""
async def convert(self, ctx, argument):
bot = ctx.bot
match = self._get_id_match(argument) or re.match(r'<@!?([0-9]+)>$', argument)
guild = ctx.guild
result = None
if match is None:
# not a mention...
if guild:
result = guild.get_member_named(argument)
else:
result = _get_from_guilds(bot, 'get_member_named', argument)
else:
user_id = int(match.group(1))
if guild:
result = guild.get_member(user_id) or _utils_get(ctx.message.mentions, id=user_id)
else:
result = _get_from_guilds(bot, 'get_member', user_id)
if result is None:
raise BadArgument('Member "{}" not found'.format(argument))
return result
class UserConverter(IDConverter):
"""Converts to a :class:`~discord.User`.
All lookups are via the global user cache.
The lookup strategy is as follows (in order):
1. Lookup by ID.
2. Lookup by mention.
3. Lookup by name#discrim
4. Lookup by name
"""
async def convert(self, ctx, argument):
match = self._get_id_match(argument) or re.match(r'<@!?([0-9]+)>$', argument)
result = None
state = ctx._state
if match is not None:
user_id = int(match.group(1))
result = ctx.bot.get_user(user_id) or _utils_get(ctx.message.mentions, id=user_id)
else:
arg = argument
# check for discriminator if it exists
if len(arg) > 5 and arg[-5] == '#':
discrim = arg[-4:]
name = arg[:-5]
predicate = lambda u: u.name == name and u.discriminator == discrim
result = discord.utils.find(predicate, state._users.values())
if result is not None:
return result
predicate = lambda u: u.name == arg
result = discord.utils.find(predicate, state._users.values())
if result is None:
raise BadArgument('User "{}" not found'.format(argument))
return result
class MessageConverter(Converter):
"""Converts to a :class:`discord.Message`.
.. versionadded:: 1.1.0
The lookup strategy is as follows (in order):
1. Lookup by "{channel ID}-{message ID}" (retrieved by shift-clicking on "Copy ID")
2. Lookup by message ID (the message **must** be in the context channel)
3. Lookup by message URL
"""
async def convert(self, ctx, argument):
id_regex = re.compile(r'^(?:(?P<channel_id>[0-9]{15,21})-)?(?P<message_id>[0-9]{15,21})$')
link_regex = re.compile(
r'^https?://(?:(ptb|canary)\.)?discordapp\.com/channels/'
r'(?:([0-9]{15,21})|(@me))'
r'/(?P<channel_id>[0-9]{15,21})/(?P<message_id>[0-9]{15,21})/?$'
)
match = id_regex.match(argument) or link_regex.match(argument)
if not match:
raise BadArgument('Message "{msg}" not found.'.format(msg=argument))
message_id = int(match.group("message_id"))
channel_id = match.group("channel_id")
message = ctx.bot._connection._get_message(message_id)
if message:
return message
channel = ctx.bot.get_channel(int(channel_id)) if channel_id else ctx.channel
if not channel:
raise BadArgument('Channel "{channel}" not found.'.format(channel=channel_id))
try:
return await channel.fetch_message(message_id)
except discord.NotFound:
raise BadArgument('Message "{msg}" not found.'.format(msg=argument))
except discord.Forbidden:
raise BadArgument("Can't read messages in {channel}".format(channel=channel.mention))
class TextChannelConverter(IDConverter):
"""Converts to a :class:`~discord.TextChannel`.
All lookups are via the local guild. If in a DM context, then the lookup
is done by the global cache.
The lookup strategy is as follows (in order):
1. Lookup by ID.
2. Lookup by mention.
3. Lookup by name
"""
async def convert(self, ctx, argument):
bot = ctx.bot
match = self._get_id_match(argument) or re.match(r'<#([0-9]+)>$', argument)
result = None
guild = ctx.guild
if match is None:
# not a mention
if guild:
result = discord.utils.get(guild.text_channels, name=argument)
else:
def check(c):
return isinstance(c, discord.TextChannel) and c.name == argument
result = discord.utils.find(check, bot.get_all_channels())
else:
channel_id = int(match.group(1))
if guild:
result = guild.get_channel(channel_id)
else:
result = _get_from_guilds(bot, 'get_channel', channel_id)
if not isinstance(result, discord.TextChannel):
raise BadArgument('Channel "{}" not found.'.format(argument))
return result
class VoiceChannelConverter(IDConverter):
"""Converts to a :class:`~discord.VoiceChannel`.
All lookups are via the local guild. If in a DM context, then the lookup
is done by the global cache.
The lookup strategy is as follows (in order):
1. Lookup by ID.
2. Lookup by mention.
3. Lookup by name
"""
async def convert(self, ctx, argument):
bot = ctx.bot
match = self._get_id_match(argument) or re.match(r'<#([0-9]+)>$', argument)
result = None
guild = ctx.guild
if match is None:
# not a mention
if guild:
result = discord.utils.get(guild.voice_channels, name=argument)
else:
def check(c):
return isinstance(c, discord.VoiceChannel) and c.name == argument
result = discord.utils.find(check, bot.get_all_channels())
else:
channel_id = int(match.group(1))
if guild:
result = guild.get_channel(channel_id)
else:
result = _get_from_guilds(bot, 'get_channel', channel_id)
if not isinstance(result, discord.VoiceChannel):
raise BadArgument('Channel "{}" not found.'.format(argument))
return result
class CategoryChannelConverter(IDConverter):
"""Converts to a :class:`~discord.CategoryChannel`.
All lookups are via the local guild. If in a DM context, then the lookup
is done by the global cache.
The lookup strategy is as follows (in order):
1. Lookup by ID.
2. Lookup by mention.
3. Lookup by name
"""
async def convert(self, ctx, argument):
bot = ctx.bot
match = self._get_id_match(argument) or re.match(r'<#([0-9]+)>$', argument)
result = None
guild = ctx.guild
if match is None:
# not a mention
if guild:
result = discord.utils.get(guild.categories, name=argument)
else:
def check(c):
return isinstance(c, discord.CategoryChannel) and c.name == argument
result = discord.utils.find(check, bot.get_all_channels())
else:
channel_id = int(match.group(1))
if guild:
result = guild.get_channel(channel_id)
else:
result = _get_from_guilds(bot, 'get_channel', channel_id)
if not isinstance(result, discord.CategoryChannel):
raise BadArgument('Channel "{}" not found.'.format(argument))
return result
class ColourConverter(Converter):
"""Converts to a :class:`~discord.Colour`.
The following formats are accepted:
- ``0x<hex>``
- ``#<hex>``
- ``0x#<hex>``
- Any of the ``classmethod`` in :class:`Colour`
- The ``_`` in the name can be optionally replaced with spaces.
"""
async def convert(self, ctx, argument):
arg = argument.replace('0x', '').lower()
if arg[0] == '#':
arg = arg[1:]
try:
value = int(arg, base=16)
if not (0 <= value <= 0xFFFFFF):
raise BadArgument('Colour "{}" is invalid.'.format(arg))
return discord.Colour(value=value)
except ValueError:
arg = arg.replace(' ', '_')
method = getattr(discord.Colour, arg, None)
if arg.startswith('from_') or method is None or not inspect.ismethod(method):
raise BadArgument('Colour "{}" is invalid.'.format(arg))
return method()
class RoleConverter(IDConverter):
"""Converts to a :class:`~discord.Role`.
All lookups are via the local guild. If in a DM context, then the lookup
is done by the global cache.
The lookup strategy is as follows (in order):
1. Lookup by ID.
2. Lookup by mention.
3. Lookup by name
"""
async def convert(self, ctx, argument):
guild = ctx.guild
if not guild:
raise NoPrivateMessage()
match = self._get_id_match(argument) or re.match(r'<@&([0-9]+)>$', argument)
if match:
result = guild.get_role(int(match.group(1)))
else:
result = discord.utils.get(guild._roles.values(), name=argument)
if result is None:
raise BadArgument('Role "{}" not found.'.format(argument))
return result
class GameConverter(Converter):
"""Converts to :class:`~discord.Game`."""
async def convert(self, ctx, argument):
return discord.Game(name=argument)
class InviteConverter(Converter):
"""Converts to a :class:`~discord.Invite`.
This is done via an HTTP request using :meth:`.Bot.fetch_invite`.
"""
async def convert(self, ctx, argument):
try:
invite = await ctx.bot.fetch_invite(argument)
return invite
except Exception as exc:
raise BadArgument('Invite is invalid or expired') from exc
class EmojiConverter(IDConverter):
"""Converts to a :class:`~discord.Emoji`.
All lookups are done for the local guild first, if available. If that lookup
fails, then it checks the client's global cache.
The lookup strategy is as follows (in order):
1. Lookup by ID.
2. Lookup by extracting ID from the emoji.
3. Lookup by name
"""
async def convert(self, ctx, argument):
match = self._get_id_match(argument) or re.match(r'<a?:[a-zA-Z0-9\_]+:([0-9]+)>$', argument)
result = None
bot = ctx.bot
guild = ctx.guild
if match is None:
# Try to get the emoji by name. Try local guild first.
if guild:
result = discord.utils.get(guild.emojis, name=argument)
if result is None:
result = discord.utils.get(bot.emojis, name=argument)
else:
emoji_id = int(match.group(1))
# Try to look up emoji by id.
if guild:
result = discord.utils.get(guild.emojis, id=emoji_id)
if result is None:
result = discord.utils.get(bot.emojis, id=emoji_id)
if result is None:
raise BadArgument('Emoji "{}" not found.'.format(argument))
return result
class PartialEmojiConverter(Converter):
"""Converts to a :class:`~discord.PartialEmoji`.
This is done by extracting the animated flag, name and ID from the emoji.
"""
async def convert(self, ctx, argument):
match = re.match(r'<(a?):([a-zA-Z0-9\_]+):([0-9]+)>$', argument)
if match:
emoji_animated = bool(match.group(1))
emoji_name = match.group(2)
emoji_id = int(match.group(3))
return discord.PartialEmoji.with_state(ctx.bot._connection, animated=emoji_animated, name=emoji_name,
id=emoji_id)
raise BadArgument('Couldn\'t convert "{}" to PartialEmoji.'.format(argument))
class clean_content(Converter):
"""Converts the argument to mention scrubbed version of
said content.
This behaves similarly to :attr:`~discord.Message.clean_content`.
Attributes
------------
fix_channel_mentions: :class:`bool`
Whether to clean channel mentions.
use_nicknames: :class:`bool`
Whether to use nicknames when transforming mentions.
escape_markdown: :class:`bool`
Whether to also escape special markdown characters.
"""
def __init__(self, *, fix_channel_mentions=False, use_nicknames=True, escape_markdown=False):
self.fix_channel_mentions = fix_channel_mentions
self.use_nicknames = use_nicknames
self.escape_markdown = escape_markdown
async def convert(self, ctx, argument):
message = ctx.message
transformations = {}
if self.fix_channel_mentions and ctx.guild:
def resolve_channel(id, *, _get=ctx.guild.get_channel):
ch = _get(id)
return ('<#%s>' % id), ('#' + ch.name if ch else '#deleted-channel')
transformations.update(resolve_channel(channel) for channel in message.raw_channel_mentions)
if self.use_nicknames and ctx.guild:
def resolve_member(id, *, _get=ctx.guild.get_member):
m = _get(id)
return '@' + m.display_name if m else '@deleted-user'
else:
def resolve_member(id, *, _get=ctx.bot.get_user):
m = _get(id)
return '@' + m.name if m else '@deleted-user'
transformations.update(
('<@%s>' % member_id, resolve_member(member_id))
for member_id in message.raw_mentions
)
transformations.update(
('<@!%s>' % member_id, resolve_member(member_id))
for member_id in message.raw_mentions
)
if ctx.guild:
def resolve_role(_id, *, _find=ctx.guild.get_role):
r = _find(_id)
return '@' + r.name if r else '@deleted-role'
transformations.update(
('<@&%s>' % role_id, resolve_role(role_id))
for role_id in message.raw_role_mentions
)
def repl(obj):
return transformations.get(obj.group(0), '')
pattern = re.compile('|'.join(transformations.keys()))
result = pattern.sub(repl, argument)
if self.escape_markdown:
result = discord.utils.escape_markdown(result)
# Completely ensure no mentions escape:
return discord.utils.escape_mentions(result)
class _Greedy:
__slots__ = ('converter',)
def __init__(self, *, converter=None):
self.converter = converter
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params,)
if len(params) != 1:
raise TypeError('Greedy[...] only takes a single argument')
converter = params[0]
if not (callable(converter) or isinstance(converter, Converter) or hasattr(converter, '__origin__')):
raise TypeError('Greedy[...] expects a type or a Converter instance.')
if converter is str or converter is type(None) or converter is _Greedy:
raise TypeError('Greedy[%s] is invalid.' % converter.__name__)
return self.__class__(converter=converter)
Greedy = _Greedy()
| {
"content_hash": "751f8bf64b3a6c019b3a67992d8d7943",
"timestamp": "",
"source": "github",
"line_count": 553,
"max_line_length": 113,
"avg_line_length": 34.09945750452079,
"alnum_prop": 0.5960651217054674,
"repo_name": "imayhaveborkedit/discord.py",
"id": "ea202b656b548f841d4dd07676a3bd7c63d9e526",
"size": "18882",
"binary": false,
"copies": "1",
"ref": "refs/heads/voice-recv-mk2",
"path": "discord/ext/commands/converter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "995828"
}
],
"symlink_target": ""
} |
"""
Quickly load ROOT symbols without triggering PyROOT's finalSetup().
The main principle is that appropriate dictionaries first need to be loaded.
"""
from __future__ import absolute_import
import ROOT
from .. import log; log = log[__name__]
from .module_facade import Facade
__all__ = []
root_module = ROOT.module._root
if hasattr(root_module, 'LookupCppEntity'): # pragma: no cover
lookup_func = 'LookupCppEntity'
else: # pragma: no cover
lookup_func = 'LookupRootEntity'
# Quick's __name__ needs to be the ROOT module for this to be transparent.
# The below is one way of obtaining such a function
# First determine the ROOT version without triggering PyROOT's finalSetup()
Quick = eval('lambda symbol: module._root.{0}(symbol)'.format(lookup_func),
ROOT.__dict__)
_gSystem = Quick("gSystem")
Load = _gSystem.Load
# It is not vital to list _all_ symbols in here, just enough that a library
# will be loaded by the time it is needed.
SYMBOLS = dict(
Hist='TH1 TGraph TGraphAsymmErrors',
Tree='TCut TTree',
Gui='TPad TCanvas',
Graf='TLegend TLine TEllipse',
Physics='TVector2 TVector3 TLorentzVector TRotation TLorentzRotation',
Matrix='TMatrixT',
RooStats='RooStats RooMsgService',
RooFit='RooFit RooWorkspace',
)
# Mapping of symbols to libraries which need to be loaded
SYMBOLS_TO_LIB = dict(
(sym, lib) for lib, syms in SYMBOLS.items() for sym in syms.split())
# If you encounter problems with particular symbols, add them to this set.
SLOW = set("".split())
@Facade(__name__, expose_internal=False)
class QuickROOT(object):
def __getattr__(self, symbol):
if symbol in SLOW: # pragma: no cover
log.warning(
"Tried to quickly load {0} which is always slow".format(symbol))
lib = SYMBOLS_TO_LIB.get(symbol, None)
if lib:
# Load() doesn't cost anything if the library is already loaded
libname = "lib{0}".format(lib)
if libname not in _gSystem.GetLibraries():
regex = "^duplicate entry .* for level 0; ignored$"
with log["/ROOT.TEnvRec.ChangeValue"].ignore(regex):
if Load(libname) == 0:
log.debug("Loaded {0} (required by {1})".format(
libname, symbol))
elif lib == 'Gui':
# Possibly no X11 forwarding
log.debug("Unable to load {0} (required by {1}). "
"Putting ROOT in batch mode.".format(
libname, symbol))
ROOT.gROOT.SetBatch(True)
else: # pragma: no cover
raise RuntimeError(
"Unable to load {0} (required by {1})".format(
libname, symbol))
try:
thing = Quick(symbol)
except NameError: # pragma: no cover
# NameError: global name 'module' is not defined
# Python must be exiting...
return None
if isinstance(thing, root_module.PropertyProxy): # descriptor
setattr(self.__class__, symbol, thing)
return getattr(self, symbol)
# normal member
return thing
| {
"content_hash": "f1aa4703c0ccfb363d9bc2aad633032c",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 80,
"avg_line_length": 37.12359550561798,
"alnum_prop": 0.5920096852300242,
"repo_name": "ndawe/rootpy",
"id": "5b210137a0d390f377fbbc2d135f86544ad6b5ce",
"size": "3304",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rootpy/utils/quickroot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "109"
},
{
"name": "Makefile",
"bytes": "2778"
},
{
"name": "Python",
"bytes": "861240"
},
{
"name": "Shell",
"bytes": "3089"
}
],
"symlink_target": ""
} |
import struct
import logging
from zorro.di import di, has_dependencies, dependency
from .keyregistry import KeyRegistry
from .mouseregistry import MouseRegistry
from .window import Window
from .xcb import Core, Rectangle, XError
from .groups import GroupManager
from .commands import CommandDispatcher
from .classify import Classifier
from .screen import ScreenManager
from .event import Event
from .config import Config
from . import randr
log = logging.getLogger(__name__)
@has_dependencies
class EventDispatcher(object):
keys = dependency(KeyRegistry, 'key-registry')
mouse = dependency(MouseRegistry, 'mouse-registry')
xcore = dependency(Core, 'xcore')
groupman = dependency(GroupManager, 'group-manager')
screenman = dependency(ScreenManager, 'screen-manager')
classifier = dependency(Classifier, 'classifier')
config = dependency(Config, 'config')
def __init__(self):
self.windows = {}
self.frames = {}
self.all_windows = {}
self.active_field = None
self.mapping_notify = Event('mapping_notify')
self.mapping_notify.listen(self._mapping_notify_delayed)
def dispatch(self, ev):
meth = getattr(self, 'handle_'+ev.__class__.__name__, None)
if meth:
meth(ev)
else:
log.warning("Unknown event ``%r''", ev)
def register_window(self, win):
self.all_windows[win.wid] = win
def handle_KeyPressEvent(self, ev):
if not self.keys.dispatch_event(ev):
if self.active_field:
self.active_field.handle_keypress(ev)
def handle_KeyReleaseEvent(self, ev):
pass # nothing to do at the moment
def handle_ButtonPressEvent(self, ev):
self.mouse.dispatch_button_press(ev)
def handle_ButtonReleaseEvent(self, ev):
self.mouse.dispatch_button_release(ev)
def handle_MotionNotifyEvent(self, ev):
self.mouse.dispatch_motion(ev)
def handle_MapRequestEvent(self, ev):
try:
win = self.windows[ev.window]
except KeyError:
log.warning("Configure request for non-existent window %r",
ev.window)
else:
win.want.visible = True
if win.frame is None:
frm = win.create_frame()
self.frames[frm.wid] = frm
self.all_windows[frm.wid] = frm
win.reparent_frame()
if not hasattr(win, 'group'):
self.classifier.apply(win)
self.groupman.add_window(win)
elif win.group.visible:
win.show()
def handle_EnterNotifyEvent(self, ev):
if self.mouse.drag:
return
try:
win = self.frames[ev.event]
except KeyError:
log.warning("Enter notify for non-existent window %r", ev.event)
else:
if ev.mode != self.xcore.NotifyMode.Grab:
if hasattr(win, 'pointer_enter'):
win.pointer_enter()
if self.active_field:
return
if(win.props.get("WM_HINTS") is None
or win.props.get('WM_HINTS')[0] & 1):
win.focus()
def handle_LeaveNotifyEvent(self, ev):
if self.mouse.drag:
return
try:
win = self.frames[ev.event]
except KeyError:
log.warning("Leave notify for non-existent window %r", ev.event)
else:
if ev.mode != self.xcore.NotifyMode.Grab:
if hasattr(win, 'pointer_leave'):
win.pointer_leave()
def handle_MapNotifyEvent(self, ev):
try:
win = self.all_windows[ev.window]
except KeyError:
log.warning("Map notify for non-existent window %r",
ev.window)
else:
if hasattr(win, 'group') and win.group.visible:
win.real.visible = True
if win.frame:
win.frame.show()
def handle_UnmapNotifyEvent(self, ev):
if ev.event not in self.frames:
return # do not need to track unmapping of unmanaged windows
try:
win = self.windows[ev.window]
except KeyError:
log.warning("Unmap notify for non-existent window %r",
ev.window)
else:
win.real.visible = False
win.done.visible = False
if win.frame:
win.ewmh.hiding_window(win)
win.frame.hide()
# According to the docs here should be reparenting of windows
# to the root window, but that doesn't work well
if hasattr(win, 'group'):
win.group.remove_window(win)
def handle_FocusInEvent(self, ev):
if(ev.event == self.xcore.root_window
and ev.mode not in (self.xcore.NotifyMode.Grab,
self.xcore.NotifyMode.Ungrab)
and ev.detail == getattr(self.xcore.NotifyDetail, 'None')):
self.xcore.raw.SetInputFocus(
focus=self.xcore.root_window,
revert_to=self.xcore.InputFocus.PointerRoot,
time=self.xcore.last_time,
)
return
try:
win = self.all_windows[ev.event]
except KeyError:
log.warning("Focus request for non-existent window %r",
ev.event)
else:
if(ev.mode not in (self.xcore.NotifyMode.Grab,
self.xcore.NotifyMode.Ungrab)
and ev.detail != self.xcore.NotifyDetail.Pointer):
win.focus_in()
def handle_FocusOutEvent(self, ev):
try:
win = self.all_windows[ev.event]
except KeyError:
log.warning("Focus request for non-existent window %r",
ev.event)
else:
if(ev.mode not in (self.xcore.NotifyMode.Grab,
self.xcore.NotifyMode.Ungrab)
and ev.detail != self.xcore.NotifyDetail.Pointer):
win.focus_out()
def handle_CreateNotifyEvent(self, ev):
win = di(self).inject(Window.from_notify(ev))
if win.wid in self.windows:
log.warning("Create notify for already existent window %r",
win.wid)
# TODO(tailhook) clean up old window
if win.wid in self.all_windows:
return
win.done.size = win.want.size
self.xcore.raw.ChangeWindowAttributes(window=win, params={
self.xcore.CW.EventMask: self.xcore.EventMask.PropertyChange
})
self.windows[win.wid] = win
self.all_windows[win.wid] = win
try:
for name in self.xcore.raw.ListProperties(window=win)['atoms']:
win.update_property(name)
except XError:
log.warning("Window destroyed immediately %d", win.wid)
def handle_ConfigureNotifyEvent(self, ev):
pass
def handle_ReparentNotifyEvent(self, ev):
pass
def handle_DestroyNotifyEvent(self, ev):
try:
win = self.all_windows.pop(ev.window)
except KeyError:
log.warning("Destroy notify for non-existent window %r",
ev.window)
else:
self.windows.pop(win.wid, None)
self.frames.pop(win.wid, None)
if hasattr(win, 'group'):
win.group.remove_window(win)
win.destroyed()
def handle_ConfigureRequestEvent(self, ev):
try:
win = self.windows[ev.window]
except KeyError:
log.warning("Configure request for non-existent window %r",
ev.window)
else:
win.update_size_request(ev)
def handle_PropertyNotifyEvent(self, ev):
try:
win = self.windows[ev.window]
except KeyError:
log.warning("Property notify event for non-existent window %r",
ev.window)
else:
win.update_property(ev.atom)
def handle_ExposeEvent(self, ev):
try:
win = self.all_windows[ev.window]
except KeyError:
log.warning("Expose event for non-existent window %r",
ev.window)
else:
win.expose(Rectangle(ev.x, ev.y, ev.width, ev.height))
def handle_ClientMessageEvent(self, ev):
type = self.xcore.atom[ev.type]
# import struct
# print("ClientMessage", ev, repr(type), struct.unpack('<5L', ev.data))
win = self.all_windows[ev.window]
if hasattr(win, 'client_message'):
win.client_message(ev)
else:
log.warning("Unhandled client message %r %r %r",
ev, type, struct.unpack('<5L', ev.data))
def handle_ScreenChangeNotifyEvent(self, ev):
# We only poll for events and use Xinerama for screen querying
# because some drivers (nvidia) doesn't provide xrandr data
# correctly
if self.config['auto-screen-configuration']:
if randr.check_screens(self.xcore):
randr.configure_outputs(self.xcore,
self.config['screen-dpi']/25.4)
info = self.xcore.xinerama.QueryScreens()['screen_info']
self.screenman.update(list(
Rectangle(scr['x_org'], scr['y_org'],
scr['width'], scr['height']) for scr in info))
self.groupman.check_screens()
def handle_NotifyEvent(self, ev): # Xrandr events are reported here
log.warning("Notify event %r", ev)
def handle_MappingNotifyEvent(self, ev):
self.mapping_notify.emit()
def _mapping_notify_delayed(self):
self.keys.reconfigure_keys()
| {
"content_hash": "fc7e46c65297eca1e19990456ec40a1f",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 79,
"avg_line_length": 35.025,
"alnum_prop": 0.5683695319669624,
"repo_name": "tailhook/tilenol",
"id": "745e5015909aed2aad2f34dec4b98553b5346775",
"size": "9807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tilenol/events.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "228140"
},
{
"name": "Shell",
"bytes": "285"
}
],
"symlink_target": ""
} |
"""VCD Variable."""
from hdltools.vcd import VCDObject, VCDScope
class VCDVariable(VCDObject):
"""Variable declaration."""
def __init__(
self, *identifiers, var_type="wire", size=1, name=None, scope=None
):
"""Initialize."""
super().__init__()
self._vartype = var_type
self._size = size
self._identifiers = identifiers
self._name = name
self._scope = scope
self._aliases = []
self._value = None
self._last_changed = 0
@property
def var_type(self):
"""Get variable type."""
return self._vartype
@property
def size(self):
"""Get variable size."""
return self._size
def __len__(self):
"""Get variable size."""
return self.size
@property
def varid(self):
"""Get variable identifier."""
return self._identifiers
@property
def name(self):
"""Get variable name."""
return self._name
@property
def aliases(self):
"""Get aliases."""
return self._aliases
@property
def scope(self):
"""Get scope."""
return self._scope
# FIXME: "identifiers" does not make sense, why would it be a list?
@property
def identifiers(self):
"""Get identifiers."""
return self._identifiers
@property
def value(self):
"""Get last known value."""
return self._value
@value.setter
def value(self, value):
"""Set value."""
self._value = value
@property
def last_changed(self):
"""Get cycle when last changed."""
return self._last_changed
@last_changed.setter
def last_changed(self, time):
"""Record change."""
self._last_changed = time
def add_alias(self, scope, name):
"""Add an alias."""
self._aliases.append((scope, name))
def get_first_identifier(self):
"""Get identifier."""
if isinstance(self._identifiers, (tuple, list)):
return self._identifiers[0]
else:
return self._identifiers
@staticmethod
def from_tokens(vtype, width, id, name, **kwargs):
"""Build from parser tokens."""
scope = kwargs.get("scope", None)
return VCDVariable(
id, var_type=vtype, size=width, name=name, scope=scope
)
def __repr__(self):
"""Get representation."""
scope_str = str(self._scope) + "::" if self._scope else ""
return "{}{} ({})".format(scope_str, self._name, self._identifiers[0])
def dump_aliases(self):
"""Get representation for aliases."""
ret = []
for scope, name in self._aliases:
scope_str = str(scope) + "::" if scope else ""
ret.append(
"{}{} ({})".format(scope_str, name, self._identifiers[0])
)
return "\n".join(ret)
def pack(self):
"""Pack into binary representation."""
dump = {
"vartype": self._vartype,
"size": self._size,
"identifiers": self._identifiers,
"name": self._name,
"scope": self._scope.pack(),
"aliases": self._aliases,
}
return dump
@staticmethod
def unpack(src):
"""Unpack."""
identifiers = src["identifiers"]
scope, _ = VCDScope.from_str(src["scope"])
var = VCDVariable(
*identifiers,
var_type=src["vartype"],
size=src["size"],
name=src["name"],
scope=scope
)
for alias in src["aliases"]:
var.add_alias(*alias)
return var
| {
"content_hash": "5b87111627400c6164280f11855dfe41",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 78,
"avg_line_length": 25.53103448275862,
"alnum_prop": 0.5232306861156132,
"repo_name": "brunosmmm/hdltools",
"id": "3ff296aab446af726cac3f16d6cb1df95a1222fb",
"size": "3702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hdltools/vcd/variable.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "470883"
},
{
"name": "Shell",
"bytes": "1354"
},
{
"name": "Verilog",
"bytes": "19781"
}
],
"symlink_target": ""
} |
from flask import Flask, request
from flask_sqlalchemy import SQLAlchemy
from flask_webtest import get_scopefunc
def make_db(app):
session_options = {}
if app.testing:
session_options['scopefunc'] = get_scopefunc()
return SQLAlchemy(app, session_options=session_options)
app = Flask(__name__)
app.testing = True
db = make_db(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
greeting = db.Column(db.String(80), default=u'Hello, %s!')
def greet(self):
return self.greeting % self.name
@app.route('/user/<int:id>/')
def user(id):
return User.query.get_or_404(id).greet()
@app.route('/user/<int:id>/preview/', methods=['POST'])
def preview(id):
user = User.query.get_or_404(id)
user.greeting = request.form['greeting']
db.session.expunge(user)
return user.greet()
| {
"content_hash": "f91e0c4358d71ee92b012496baca1a96",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 62,
"avg_line_length": 24.054054054054053,
"alnum_prop": 0.6674157303370787,
"repo_name": "aromanovich/flask-webtest",
"id": "72f85e99ba84cb2de111d35bb48878618dc8fe7f",
"size": "890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/core_sqlalchemy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "186"
},
{
"name": "Python",
"bytes": "16603"
},
{
"name": "Shell",
"bytes": "40"
}
],
"symlink_target": ""
} |
from django.views import generic
from . import forms
from django.shortcuts import redirect
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth import authenticate
from django.contrib.auth import login
from django.contrib.auth import logout
from django.contrib import messages
class SignInAndSignUp(generic.edit.FormMixin, generic.TemplateView):
signin_form_class = forms.LoginForm
signup_form_class = forms.SignupForm
def get(self, request, *args, **kwargs):
if "signin_form" not in kwargs:
kwargs["signin_form"] = self.signin_form_class()
if "signup_form" not in kwargs:
kwargs["signup_form"] = self.signup_form_class()
return super(SignInAndSignUp, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
if 'sign_in' in request.POST:
form = self.signin_form_class(**self.get_form_kwargs())
if not form.is_valid():
messages.add_message(request,
messages.ERROR,
"Unable login! "
"Check username/password")
return super(SignInAndSignUp, self).get(request,
signup_form=self.signup_form_class(),
signin_form=form)
username = form.cleaned_data["username"]
password = form.cleaned_data["password"]
user = authenticate(username=username, password=password)
if user is not None and user.is_active:
login(self.request, user)
else:
messages.add_message(request, messages.ERROR,
"Unable to find given username!")
if 'sign_up' in request.POST:
form = self.signup_form_class(**self.get_form_kwargs())
if not form.is_valid():
messages.add_message(request,
messages.ERROR,
"Unable to register! "
"Please retype the details")
return super(SignInAndSignUp, self).get(request,
signin_form=self.signin_form_class(),
signup_form=form)
form.save()
username = form.cleaned_data["username"]
password = form.cleaned_data["password1"]
messages.add_message(request,
messages.INFO,
"{0} added sucessfully".format(
username))
# Login automatically
user = authenticate(username=username, password=password)
login(self.request, user)
return redirect("home")
class LogoutView(generic.RedirectView):
url = reverse_lazy("home")
def get(self, request, *args, **kwargs):
logout(request)
messages.add_message(request, messages.INFO,
"Logout successful!")
return super(LogoutView, self).get(request, *args, **kwargs)
class ProductView(generic.TemplateView):
template_name = "product.html"
class ServiceView(generic.TemplateView):
template_name = "service.html"
class ContactView(generic.TemplateView):
template_name = "contact.html"
class AboutView(generic.TemplateView):
template_name = "about.html"
| {
"content_hash": "bf228e1f4aaa360a44e8a53120124958",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 73,
"avg_line_length": 40.92941176470588,
"alnum_prop": 0.5596435757401552,
"repo_name": "aarticianpc/marine-florits",
"id": "be5ab7de63d9b375ea034750b624efcf02cbbfbb",
"size": "3517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/accounts/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16967"
},
{
"name": "CoffeeScript",
"bytes": "19515"
},
{
"name": "HTML",
"bytes": "47444"
},
{
"name": "JavaScript",
"bytes": "138458"
},
{
"name": "Python",
"bytes": "56604"
}
],
"symlink_target": ""
} |
import cgi
from paste.urlparser import PkgResourcesParser
from pylons.middleware import error_document_template
from webhelpers.html.builder import literal
from abraxas.lib.base import BaseController
class ErrorController(BaseController):
"""Generates error documents as and when they are required.
The ErrorDocuments middleware forwards to ErrorController when error
related status codes are returned from the application.
This behaviour can be altered by changing the parameters to the
ErrorDocuments middleware in your config/middleware.py file.
"""
def document(self):
"""Render the error document"""
request = self._py_object.request
resp = request.environ.get('pylons.original_response')
content = literal(resp.body) or cgi.escape(request.GET.get('message', ''))
page = error_document_template % \
dict(prefix=request.environ.get('SCRIPT_NAME', ''),
code=cgi.escape(request.GET.get('code', str(resp.status_int))),
message=content)
return page
def img(self, id):
"""Serve Pylons' stock images"""
return self._serve_file('/'.join(['media/img', id]))
def style(self, id):
"""Serve Pylons' stock stylesheets"""
return self._serve_file('/'.join(['media/style', id]))
def _serve_file(self, path):
"""Call Paste's FileApp (a WSGI application) to serve the file
at the specified path
"""
request = self._py_object.request
request.environ['PATH_INFO'] = '/%s' % path
return PkgResourcesParser('pylons', 'pylons')(request.environ, self.start_response)
| {
"content_hash": "d742f0b8f185cd0ffb409eb3be465827",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 91,
"avg_line_length": 37.97727272727273,
"alnum_prop": 0.6624775583482945,
"repo_name": "thakadu/Abraxas",
"id": "31d31c30c202677169cdd1fd5b1945e944e4bb2b",
"size": "1671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "abraxas/controllers/error.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "41287"
}
],
"symlink_target": ""
} |
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import tensorflow.python.platform
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import types
from tensorflow.python.platform import googletest
from tensorflow.python.ops import logging_ops
class TestUtilTest(test_util.TensorFlowTestCase):
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero"
in fe.exception.message)
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in fe.exception.message)
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [self.checkedThread(target=err_func, args=(i,))
for i in range(10)]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
node_def = ops._NodeDef("op_type", "name")
node_def_orig = ops._NodeDef("op_type_orig", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(), original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 8)
def testForceGPU(self):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Cannot assign a device to node"):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = [True]
y = [15]
logging_ops.Assert(x, y).run()
if __name__ == "__main__":
googletest.main()
| {
"content_hash": "a1ad535aa2b6bd8ecf385d0dd72708e3",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 80,
"avg_line_length": 31.69172932330827,
"alnum_prop": 0.672835112692764,
"repo_name": "MemeticParadigm/TensorFlow",
"id": "fc847975017d329d6a437c23b9790ff68670ddb1",
"size": "4215",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/framework/test_util_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "127104"
},
{
"name": "C++",
"bytes": "4901913"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "637241"
},
{
"name": "Java",
"bytes": "44388"
},
{
"name": "JavaScript",
"bytes": "5067"
},
{
"name": "Objective-C",
"bytes": "630"
},
{
"name": "Protocol Buffer",
"bytes": "45213"
},
{
"name": "Python",
"bytes": "2473570"
},
{
"name": "Shell",
"bytes": "1714"
},
{
"name": "TypeScript",
"bytes": "237446"
}
],
"symlink_target": ""
} |
from azure_common import BaseTest, arm_template
from c7n.utils import local_session
from c7n_azure.query import ChildTypeInfo
from c7n_azure.session import Session
from c7n_azure.utils import ResourceIdParser
class RecordSetTest(BaseTest):
def test_record_set_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'record-set-policy',
'resource': 'azure.recordset'
}, validate=True)
self.assertTrue(p)
@arm_template('dns.json')
def test_find_by_name(self):
p = self.load_policy({
'name': 'test-find-by-name',
'resource': 'azure.recordset',
'filters': [
{
'type': 'value',
'key': 'name',
'op': 'eq',
'value': 'www'
}
]
})
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['name'], 'www')
class DeleteRecordSetTest(BaseTest):
@classmethod
def setUpClass(cls, *args, **kwargs):
super(DeleteRecordSetTest, cls).setUpClass(*args, **kwargs)
cls.client = local_session(Session).client('azure.mgmt.dns.DnsManagementClient').record_sets
def tearDown(self, *args, **kwargs):
super(DeleteRecordSetTest, self).tearDown(*args, **kwargs)
rs = self.deleted_recordset
rs_id = rs['id']
rs_parent_id = rs[ChildTypeInfo.parent_key]
zone_name = ResourceIdParser.get_resource_name(rs_parent_id)
rs_name = ResourceIdParser.get_resource_name(rs_id)
rs_type = rs['type'].split('/')[-1]
rs_ttl = rs['properties']['TTL']
rs_arecord_ipaddr = rs['properties']['ARecords'][0]['ipv4Address']
DeleteRecordSetTest.client.create_or_update(
resource_group_name=rs['resourceGroup'],
zone_name=zone_name,
relative_record_set_name=rs_name,
record_type=rs_type,
parameters={
'ttl': rs_ttl,
'arecords': [
{
'ipv4_address': rs_arecord_ipaddr
}
]
},
)
@arm_template('dns.json')
def test_delete_a_record_set(self):
record_set_name = 'deleteme'
p = self.load_policy({
'name': 'test-delete-a-record-set',
'resource': 'azure.recordset',
'filters': [
{
'type': 'value',
'key': 'name',
'op': 'eq',
'value': record_set_name
}
],
'actions': [
{
'type': 'delete'
}
]
})
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['name'], record_set_name)
rs = resources[0]
self.deleted_recordset = rs
rg = rs['resourceGroup']
zone = ResourceIdParser.get_resource_name(rs[ChildTypeInfo.parent_key])
self._assert_record_set_not_present(record_set_name, rg, zone)
def _assert_record_set_not_present(self, name, resource_group, dns_zone):
record_sets = DeleteRecordSetTest.client.list_by_dns_zone(resource_group, dns_zone)
record_set = next((rs for rs in record_sets if rs.name == name), None)
self.assertIsNone(record_set)
| {
"content_hash": "9f10088429d3830d0040dbd4ae155b24",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 100,
"avg_line_length": 32.357798165137616,
"alnum_prop": 0.5250921462999717,
"repo_name": "FireballDWF/cloud-custodian",
"id": "a26785aeefbe5189a14210f7f17a9bda5c7426b9",
"size": "4099",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/c7n_azure/tests/test_record_set.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7986"
},
{
"name": "Go",
"bytes": "142024"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9857"
},
{
"name": "PowerShell",
"bytes": "1440"
},
{
"name": "Python",
"bytes": "4893319"
},
{
"name": "Shell",
"bytes": "7227"
}
],
"symlink_target": ""
} |
from piped_statsd import version
from piped_statsd.reporter import MetricsReporter
| {
"content_hash": "39bb98cb438fb4bf23fe6d1633f07532",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 49,
"avg_line_length": 41.5,
"alnum_prop": 0.8674698795180723,
"repo_name": "alexbrasetvik/Piped",
"id": "47bf0affc6db0479e310ae2f295a3f66acf023db",
"size": "83",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "contrib/statsd/piped/plugins/statsd_plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1144292"
},
{
"name": "Shell",
"bytes": "4509"
}
],
"symlink_target": ""
} |
import feedparser
import entry
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
def compile_feeds(rsslist):
return reduce(lambda i, j: i + j,
map(lambda k: feedparser.parse(k)['entries'], rsslist), [])
def load_feed(rsslist):
for i in compile_feeds(rsslist):
if not entry.exists(i):
log.info("Created entry: " + str(i))
entry.create_entry(i)
| {
"content_hash": "c1e54f5ec69a0f3733ceb1f44b156c23",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 77,
"avg_line_length": 24.72222222222222,
"alnum_prop": 0.6314606741573033,
"repo_name": "ChattanoogaPublicLibrary/booksforcha",
"id": "06cbe92c915cd89d7ef7b50ed962bb4c2556eb30",
"size": "470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "booksforcha/feed.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2517"
},
{
"name": "Python",
"bytes": "14880"
}
],
"symlink_target": ""
} |
import time
import subprocess
import libqtile
import libqtile.layout
import libqtile.bar
import libqtile.command
import libqtile.widget
import libqtile.manager
import libqtile.config
import libqtile.hook
import libqtile.confreader
from nose.tools import assert_raises
from nose.plugins.attrib import attr
from . import utils
from .utils import Xephyr
class TestConfig:
auto_fullscreen = True
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
libqtile.layout.stack.Stack(num_stacks=1),
libqtile.layout.stack.Stack(num_stacks=2),
libqtile.layout.max.Max()
]
floating_layout = libqtile.layout.floating.Floating(
float_rules=[dict(wmclass="xclock")])
keys = [
libqtile.config.Key(
["control"],
"k",
libqtile.command._Call([("layout", None)], "up")
),
libqtile.config.Key(
["control"],
"j",
libqtile.command._Call([("layout", None)], "down")
),
]
mouse = []
screens = [libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
libqtile.widget.GroupBox(),
],
20
),
)]
main = None
follow_mouse_focus = True
class BareConfig:
auto_fullscreen = True
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
libqtile.layout.stack.Stack(num_stacks=1),
libqtile.layout.stack.Stack(num_stacks=2)
]
floating_layout = libqtile.layout.floating.Floating()
keys = [
libqtile.config.Key(
["control"],
"k",
libqtile.command._Call([("layout", None)], "up")
),
libqtile.config.Key(
["control"],
"j",
libqtile.command._Call([("layout", None)], "down")
),
]
mouse = []
screens = [libqtile.config.Screen()]
main = None
follow_mouse_focus = False
@Xephyr(True, TestConfig())
def test_screen_dim(self):
#self.c.restart()
self.testXclock()
assert self.c.screen.info()["index"] == 0
assert self.c.screen.info()["x"] == 0
assert self.c.screen.info()["width"] == 800
assert self.c.group.info()["name"] == 'a'
assert self.c.group.info()["focus"] == 'xclock'
self.c.to_screen(1)
self.testXeyes()
assert self.c.screen.info()["index"] == 1
assert self.c.screen.info()["x"] == 800
assert self.c.screen.info()["width"] == 640
assert self.c.group.info()["name"] == 'b'
assert self.c.group.info()["focus"] == 'xeyes'
self.c.to_screen(0)
assert self.c.screen.info()["index"] == 0
assert self.c.screen.info()["x"] == 0
assert self.c.screen.info()["width"] == 800
assert self.c.group.info()["name"] == 'a'
assert self.c.group.info()["focus"] == 'xclock'
@Xephyr(True, TestConfig(), xoffset=0)
def test_clone_dim(self):
self.testXclock()
assert self.c.screen.info()["index"] == 0
assert self.c.screen.info()["x"] == 0
assert self.c.screen.info()["width"] == 800
assert self.c.group.info()["name"] == 'a'
assert self.c.group.info()["focus"] == 'xclock'
assert len(self.c.screens()) == 1
@Xephyr(True, TestConfig())
def test_to_screen(self):
assert self.c.screen.info()["index"] == 0
self.c.to_screen(1)
assert self.c.screen.info()["index"] == 1
self.testWindow("one")
self.c.to_screen(0)
self.testWindow("two")
ga = self.c.groups()["a"]
assert ga["windows"] == ["two"]
gb = self.c.groups()["b"]
assert gb["windows"] == ["one"]
assert self.c.window.info()["name"] == "two"
self.c.next_screen()
assert self.c.window.info()["name"] == "one"
self.c.next_screen()
assert self.c.window.info()["name"] == "two"
self.c.prev_screen()
assert self.c.window.info()["name"] == "one"
@Xephyr(True, TestConfig())
def test_togroup(self):
self.testWindow("one")
assert_raises(libqtile.command.CommandError,
self.c.window.togroup, "nonexistent")
assert self.c.groups()["a"]["focus"] == "one"
self.c.window.togroup("a")
assert self.c.groups()["a"]["focus"] == "one"
self.c.window.togroup("b")
assert self.c.groups()["b"]["focus"] == "one"
assert self.c.groups()["a"]["focus"] == None
self.c.to_screen(1)
self.c.window.togroup("c")
assert self.c.groups()["c"]["focus"] == "one"
@Xephyr(True, TestConfig())
def test_resize(self):
self.c.screen[0].resize(x=10, y=10, w=100, h=100)
for _ in range(10):
time.sleep(0.1)
d = self.c.screen[0].info()
if d["width"] == d["height"] == 100:
break
else:
raise AssertionError("Screen didn't resize")
assert d["x"] == d["y"] == 10
@Xephyr(False, BareConfig())
def test_minimal(self):
assert self.c.status() == "OK"
@Xephyr(False, TestConfig())
def test_events(self):
assert self.c.status() == "OK"
# FIXME: failing test disabled. For some reason we don't seem
# to have a keymap in Xnest or Xephyr 99% of the time.
@Xephyr(False, TestConfig())
def test_keypress(self):
self.testWindow("one")
self.testWindow("two")
v = self.c.simulate_keypress(["unknown"], "j")
assert v.startswith("Unknown modifier")
assert self.c.groups()["a"]["focus"] == "two"
self.c.simulate_keypress(["control"], "j")
assert self.c.groups()["a"]["focus"] == "one"
@Xephyr(False, TestConfig())
def test_spawn(self):
# Spawn something with a pid greater than init's
assert int(self.c.spawn("true")) > 1
@Xephyr(False, TestConfig())
def test_kill(self):
self.testWindow("one")
self.testwindows = []
self.c.window[self.c.window.info()["id"]].kill()
self.c.sync()
for _ in range(20):
time.sleep(0.1)
if len(self.c.windows()) == 0:
break
else:
raise AssertionError("Window did not die...")
@Xephyr(False, TestConfig())
def test_regression_groupswitch(self):
self.c.group["c"].toscreen()
self.c.group["d"].toscreen()
assert self.c.groups()["c"]["screen"] == None
@Xephyr(False, TestConfig())
def test_next_layout(self):
self.testWindow("one")
self.testWindow("two")
assert len(self.c.layout.info()["stacks"]) == 1
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.c.next_layout()
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 1
@Xephyr(False, TestConfig())
def test_setlayout(self):
assert not self.c.layout.info()["name"] == "max"
self.c.group.setlayout("max")
assert self.c.layout.info()["name"] == "max"
@Xephyr(False, TestConfig())
def test_adddelgroup(self):
self.testWindow("one")
self.c.addgroup("dummygroup")
self.c.addgroup("testgroup")
assert "testgroup" in self.c.groups().keys()
self.c.window.togroup("testgroup")
self.c.delgroup("testgroup")
assert not "testgroup" in self.c.groups().keys()
# Assert that the test window is still a member of some group.
assert sum(len(i["windows"]) for i in self.c.groups().values())
for i in list(self.c.groups().keys())[:-1]:
self.c.delgroup(i)
assert_raises(libqtile.command.CommandException,
self.c.delgroup, list(self.c.groups().keys())[0])
@Xephyr(False, TestConfig())
def test_delgroup(self):
self.testWindow("one")
for i in ['a', 'd', 'c']:
self.c.delgroup(i)
assert_raises(libqtile.command.CommandException, self.c.delgroup, 'b')
@Xephyr(False, TestConfig())
def test_nextprevgroup(self):
start = self.c.group.info()["name"]
ret = self.c.screen.next_group()
assert self.c.group.info()["name"] != start
assert self.c.group.info()["name"] == ret
ret = self.c.screen.prev_group()
assert self.c.group.info()["name"] == start
@Xephyr(False, TestConfig())
def test_togglegroup(self):
self.c.group["a"].toscreen()
self.c.group["b"].toscreen()
self.c.screen.togglegroup("c")
assert self.c.group.info()["name"] == "c"
self.c.screen.togglegroup("c")
assert self.c.group.info()["name"] == "b"
self.c.screen.togglegroup()
assert self.c.group.info()["name"] == "c"
@Xephyr(False, TestConfig())
def test_inspect_xeyes(self):
self.testXeyes()
assert self.c.window.inspect()
@Xephyr(False, TestConfig())
def test_inspect_xterm(self):
self.testXterm()
assert self.c.window.inspect()["wm_class"]
@Xephyr(False, TestConfig())
def test_static(self):
self.testXeyes()
self.testWindow("one")
self.c.window[self.c.window.info()["id"]].static(0, 0, 0, 100, 100)
@Xephyr(False, TestConfig())
def test_match(self):
self.testXeyes()
assert self.c.window.match(wname="xeyes")
assert not self.c.window.match(wname="nonexistent")
@Xephyr(False, TestConfig())
def test_default_float(self):
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXclock()
assert self.c.group.info()['focus'] == 'xclock'
assert self.c.window.info()['width'] == 164
assert self.c.window.info()['height'] == 164
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
assert self.c.window.info()['floating'] == True
self.c.window.move_floating(10, 20, 42, 42)
assert self.c.window.info()['width'] == 164
assert self.c.window.info()['height'] == 164
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
assert self.c.window.info()['floating'] == True
@Xephyr(False, TestConfig())
def test_last_float_size(self):
"""
When you re-float something it would be preferable to have it
use the previous float size
"""
self.testXeyes()
assert self.c.window.info()['name'] == 'xeyes'
assert self.c.window.info()['width'] == 798
assert self.c.window.info()['height'] == 578
self.c.window.toggle_floating()
assert self.c.window.info()['width'] == 150
assert self.c.window.info()['height'] == 100
# resize
self.c.window.set_size_floating(50, 90, 42, 42)
assert self.c.window.info()['width'] == 50
assert self.c.window.info()['height'] == 90
self.c.window.toggle_floating()
assert self.c.window.info()['width'] == 798
assert self.c.window.info()['height'] == 578
# float again, should use last float size
self.c.window.toggle_floating()
assert self.c.window.info()['width'] == 50
assert self.c.window.info()['height'] == 90
# make sure it works through min and max
self.c.window.toggle_maximize()
self.c.window.toggle_minimize()
self.c.window.toggle_minimize()
self.c.window.toggle_floating()
assert self.c.window.info()['width'] == 50
assert self.c.window.info()['height'] == 90
@Xephyr(False, TestConfig())
def test_float_max_min_combo(self):
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXterm()
self.testXeyes()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
assert self.c.window.info()['floating'] == False
self.c.window.toggle_maximize()
assert self.c.window.info()['floating'] == True
assert self.c.window.info()['maximized'] == True
assert self.c.window.info()['width'] == 800
assert self.c.window.info()['height'] == 580
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_minimize()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['floating'] == True
assert self.c.window.info()['minimized'] == True
assert self.c.window.info()['width'] == 800
assert self.c.window.info()['height'] == 580
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_floating()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['floating'] == False
assert self.c.window.info()['minimized'] == False
assert self.c.window.info()['maximized'] == False
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
@Xephyr(False, TestConfig())
def test_toggle_fullscreen(self):
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXterm()
self.testXeyes()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['float_info'] == {
'y': 0, 'x': 400, 'w': 150, 'h': 100}
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
self.c.window.toggle_fullscreen()
assert self.c.window.info()['floating'] == True
assert self.c.window.info()['maximized'] == False
assert self.c.window.info()['fullscreen'] == True
assert self.c.window.info()['width'] == 800
assert self.c.window.info()['height'] == 600
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_fullscreen()
assert self.c.window.info()['floating'] == False
assert self.c.window.info()['maximized'] == False
assert self.c.window.info()['fullscreen'] == False
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
@Xephyr(False, TestConfig())
def test_toggle_max(self):
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXterm()
self.testXeyes()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['float_info'] == {
'y': 0, 'x': 400, 'w': 150, 'h': 100}
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
self.c.window.toggle_maximize()
assert self.c.window.info()['floating'] == True
assert self.c.window.info()['maximized'] == True
assert self.c.window.info()['width'] == 800
assert self.c.window.info()['height'] == 580
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_maximize()
assert self.c.window.info()['floating'] == False
assert self.c.window.info()['maximized'] == False
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
@Xephyr(False, TestConfig())
def test_toggle_min(self):
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXterm()
self.testXeyes()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['float_info'] == {
'y': 0, 'x': 400, 'w': 150, 'h': 100}
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
self.c.window.toggle_minimize()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['floating'] == True
assert self.c.window.info()['minimized'] == True
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
self.c.window.toggle_minimize()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['floating'] == False
assert self.c.window.info()['minimized'] == False
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
@Xephyr(False, TestConfig())
def test_toggle_floating(self):
self.testXeyes()
assert self.c.window.info()['floating'] == False
self.c.window.toggle_floating()
assert self.c.window.info()['floating'] == True
self.c.window.toggle_floating()
assert self.c.window.info()['floating'] == False
self.c.window.toggle_floating()
assert self.c.window.info()['floating'] == True
#change layout (should still be floating)
self.c.next_layout()
assert self.c.window.info()['floating'] == True
@Xephyr(False, TestConfig())
def test_floating_focus(self):
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXterm()
self.testXeyes()
#self.testWindow("one")
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
self.c.window.toggle_floating()
self.c.window.move_floating(10, 20, 42, 42)
assert self.c.window.info()['name'] == 'xeyes'
assert self.c.group.info()['focus'] == 'xeyes'
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
# change focus to xterm
self.c.group.next_window()
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['name'] != 'xeyes'
assert self.c.group.info()['focus'] != 'xeyes'
# check what stack thinks is focus
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
# focus back to xeyes
self.c.group.next_window()
assert self.c.window.info()['name'] == 'xeyes'
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
# now focusing via layout is borked (won't go to float)
self.c.layout.up()
assert self.c.window.info()['name'] != 'xeyes'
self.c.layout.up()
assert self.c.window.info()['name'] != 'xeyes'
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
# focus back to xeyes
self.c.group.next_window()
assert self.c.window.info()['name'] == 'xeyes'
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
@Xephyr(False, TestConfig())
def test_move_floating(self):
self.testXeyes()
#self.testWindow("one")
assert self.c.window.info()['width'] == 798
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_floating()
assert self.c.window.info()['floating'] == True
self.c.window.move_floating(10, 20, 42, 42)
assert self.c.window.info()['width'] == 150
assert self.c.window.info()['height'] == 100
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
self.c.window.set_size_floating(50, 90, 42, 42)
assert self.c.window.info()['width'] == 50
assert self.c.window.info()['height'] == 90
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
self.c.window.resize_floating(10, 20, 42, 42)
assert self.c.window.info()['width'] == 60
assert self.c.window.info()['height'] == 110
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
self.c.window.set_size_floating(10, 20, 42, 42)
assert self.c.window.info()['width'] == 10
assert self.c.window.info()['height'] == 20
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
#change layout (x, y should be same)
self.c.next_layout()
assert self.c.window.info()['width'] == 10
assert self.c.window.info()['height'] == 20
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
@Xephyr(False, TestConfig(), randr=True)
def test_screens(self):
assert len(self.c.screens())
@Xephyr(False, TestConfig(), randr=True)
def test_rotate(self):
self.testWindow("one")
s = self.c.screens()[0]
height, width = s["height"], s["width"]
subprocess.call(
[
"xrandr",
"--output", "default",
"-display", self.display,
"--rotate", "left"
],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE
)
for _ in range(10):
time.sleep(0.1)
s = self.c.screens()[0]
if s["width"] == height and s["height"] == width:
break
else:
raise AssertionError("Screen did not rotate")
# TODO: see note on test_resize
@Xephyr(False, TestConfig(), randr=True)
def test_resize_(self):
self.testWindow("one")
subprocess.call(
[
"xrandr",
"-s", "480x640",
"-display", self.display
]
)
for _ in range(10):
time.sleep(0.1)
d = self.c.screen.info()
if d["width"] == 480 and d["height"] == 640:
break
else:
raise AssertionError("Screen did not resize")
@Xephyr(False, TestConfig())
def test_focus_stays_on_layout_switch(xephyr):
xephyr.testWindow("one")
xephyr.testWindow("two")
# switch to a double stack layout
xephyr.c.next_layout()
# focus on a different window than the default
xephyr.c.layout.next()
# toggle the layout
xephyr.c.next_layout()
xephyr.c.prev_layout()
assert xephyr.c.window.info()['name'] == 'one'
# Due to https://github.com/nose-devs/nose/issues/478, nose 1.1.2 ignores
# attributes on yielded functions. Workaround is to attach the attribute
# to the generator function. Can be removed once the issue is resolved.
@attr('xephyr')
def qtile_tests():
for config in (BareConfig, TestConfig):
for xinerama in (True, False):
@Xephyr(xinerama, config)
def test_xeyes(self):
self.testXeyes()
yield test_xeyes
@Xephyr(xinerama, config)
def test_xterm(self):
self.testXterm()
yield test_xterm
@Xephyr(xinerama, config)
def test_xterm_kill(self):
self.testXterm()
self.c.window.kill()
self.c.sync()
for _ in range(10):
time.sleep(0.1)
if not self.c.windows():
break
else:
raise AssertionError("xterm did not die")
yield test_xterm_kill
@Xephyr(xinerama, config)
def test_mapRequest(self):
self.testWindow("one")
info = self.c.groups()["a"]
assert "one" in info["windows"]
assert info["focus"] == "one"
self.testWindow("two")
info = self.c.groups()["a"]
assert "two" in info["windows"]
assert info["focus"] == "two"
yield test_mapRequest
@Xephyr(xinerama, config)
def test_unmap(self):
one = self.testWindow("one")
two = self.testWindow("two")
three = self.testWindow("three")
info = self.c.groups()["a"]
assert info["focus"] == "three"
assert len(self.c.windows()) == 3
self.kill(three)
assert len(self.c.windows()) == 2
info = self.c.groups()["a"]
assert info["focus"] == "two"
self.kill(two)
assert len(self.c.windows()) == 1
info = self.c.groups()["a"]
assert info["focus"] == "one"
self.kill(one)
assert len(self.c.windows()) == 0
info = self.c.groups()["a"]
assert info["focus"] == None
yield test_unmap
@Xephyr(xinerama, config)
def test_setgroup(self):
self.testWindow("one")
self.c.group["b"].toscreen()
self.groupconsistency()
if len(self.c.screens()) == 1:
assert self.c.groups()["a"]["screen"] == None
else:
assert self.c.groups()["a"]["screen"] == 1
assert self.c.groups()["b"]["screen"] == 0
self.c.group["c"].toscreen()
self.groupconsistency()
assert self.c.groups()["c"]["screen"] == 0
yield test_setgroup
@Xephyr(xinerama, config)
def test_unmap_noscreen(self):
self.testWindow("one")
pid = self.testWindow("two")
assert len(self.c.windows()) == 2
self.c.group["c"].toscreen()
self.groupconsistency()
self.c.status()
assert len(self.c.windows()) == 2
self.kill(pid)
assert len(self.c.windows()) == 1
assert self.c.groups()["a"]["focus"] == "one"
yield test_unmap_noscreen
def test_init():
assert_raises(
libqtile.manager.QtileError,
libqtile.config.Key,
[], "unknown", libqtile.command._Call("base", None, "foo")
)
assert_raises(
libqtile.manager.QtileError,
libqtile.config.Key,
["unknown"], "x", libqtile.command._Call("base", None, "foo")
)
class TScreen(libqtile.config.Screen):
def setGroup(self, x):
pass
def test_dx():
s = TScreen(left=libqtile.bar.Gap(10))
s._configure(None, 0, 0, 0, 100, 100, None)
assert s.dx == 10
def test_dwidth():
s = TScreen(left=libqtile.bar.Gap(10))
s._configure(None, 0, 0, 0, 100, 100, None)
assert s.dwidth == 90
s.right = libqtile.bar.Gap(10)
assert s.dwidth == 80
def test_dy():
s = TScreen(top=libqtile.bar.Gap(10))
s._configure(None, 0, 0, 0, 100, 100, None)
assert s.dy == 10
def test_dheight():
s = TScreen(top=libqtile.bar.Gap(10))
s._configure(None, 0, 0, 0, 100, 100, None)
assert s.dheight == 90
s.bottom = libqtile.bar.Gap(10)
assert s.dheight == 80
class _Config:
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
libqtile.layout.stack.Stack(num_stacks=1),
libqtile.layout.stack.Stack(num_stacks=2)
]
floating_layout = libqtile.layout.floating.Floating()
keys = [
libqtile.config.Key(
["control"],
"k",
libqtile.command._Call([("layout", None)], "up")
),
libqtile.config.Key(
["control"],
"j",
libqtile.command._Call([("layout", None)], "down")
),
]
mouse = []
screens = [libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
libqtile.widget.GroupBox(),
],
20
),
)]
auto_fullscreen = True
class ClientNewStaticConfig(_Config):
@staticmethod
def main(c):
def client_new(c):
c.static(0)
libqtile.hook.subscribe.client_new(client_new)
@Xephyr(False, ClientNewStaticConfig())
def test_minimal_(self):
a = self.testWindow("one")
self.kill(a)
if utils.whereis("gkrellm"):
@Xephyr(False, ClientNewStaticConfig())
def test_gkrellm(self):
self.testGkrellm()
time.sleep(0.1)
class ToGroupConfig(_Config):
@staticmethod
def main(c):
def client_new(c):
c.togroup("d")
libqtile.hook.subscribe.client_new(client_new)
@Xephyr(False, ToGroupConfig())
def test_minimal__(self):
self.c.group["d"].toscreen()
self.c.group["a"].toscreen()
a = self.testWindow("one")
assert len(self.c.group["d"].info()["windows"]) == 1
self.kill(a)
@Xephyr(False, TestConfig)
def test_colorPixel(self):
# test for #394
self.c.eval("self.colorPixel(\"ffffff\")")
| {
"content_hash": "9911e638434132d5b9b9b6a67ee64b1d",
"timestamp": "",
"source": "github",
"line_count": 921,
"max_line_length": 75,
"avg_line_length": 31.140065146579804,
"alnum_prop": 0.5773012552301255,
"repo_name": "nxnfufunezn/qtile",
"id": "1df7800bb05518009a7a1a1181c1d15d498a8ee2",
"size": "30013",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "test/test_manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "3598"
},
{
"name": "Makefile",
"bytes": "1180"
},
{
"name": "Python",
"bytes": "918526"
},
{
"name": "Shell",
"bytes": "2833"
}
],
"symlink_target": ""
} |
import numpy as np
class KNearestNeighbor:
""" a kNN classifier with L2 distance """
def __init__(self):
pass
def train(self, X, y):
"""
Train the classifier. For k-nearest neighbors this is just
memorizing the training data.
Input:
X - A num_train x dimension array where each row is a training point.
y - A vector of length num_train, where y[i] is the label for X[i, :]
"""
self.X_train = X
self.y_train = y
def predict(self, X, k=1, num_loops=0):
"""
Predict labels for test data using this classifier.
Input:
X - A num_test x dimension array where each row is a test point.
k - The number of nearest neighbors that vote for predicted label
num_loops - Determines which method to use to compute distances
between training points and test points.
Output:
y - A vector of length num_test, where y[i] is the predicted label for the
test point X[i, :].
"""
if num_loops == 0:
dists = self.compute_distances_no_loops(X)
elif num_loops == 1:
dists = self.compute_distances_one_loop(X)
elif num_loops == 2:
dists = self.compute_distances_two_loops(X)
else:
raise ValueError('Invalid value %d for num_loops' % num_loops)
return self.predict_labels(dists, k=k)
def compute_distances_two_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a nested loop over both the training data and the
test data.
Input:
X - An num_test x dimension array where each row is a test point.
Output:
dists - A num_test x num_train array where dists[i, j] is the distance
between the ith test point and the jth training point.
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in xrange(num_test):
for j in xrange(num_train):
#####################################################################
# TODO: #
# Compute the l2 distance between the ith test point and the jth #
# training point, and store the result in dists[i, j] #
#####################################################################
pass
#####################################################################
# END OF YOUR CODE #
#####################################################################
return dists
def compute_distances_one_loop(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a single loop over the test data.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in xrange(num_test):
#######################################################################
# TODO: #
# Compute the l2 distance between the ith test point and all training #
# points, and store the result in dists[i, :]. #
#######################################################################
pass
#######################################################################
# END OF YOUR CODE #
#######################################################################
return dists
def compute_distances_no_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using no explicit loops.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
#########################################################################
# TODO: #
# Compute the l2 distance between all test points and all training #
# points without using any explicit loops, and store the result in #
# dists. #
# HINT: Try to formulate the l2 distance using matrix multiplication #
# and two broadcast sums. #
#########################################################################
pass
#########################################################################
# END OF YOUR CODE #
#########################################################################
return dists
def predict_labels(self, dists, k=1):
"""
Given a matrix of distances between test points and training points,
predict a label for each test point.
Input:
dists - A num_test x num_train array where dists[i, j] gives the distance
between the ith test point and the jth training point.
Output:
y - A vector of length num_test where y[i] is the predicted label for the
ith test point.
"""
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in xrange(num_test):
# A list of length k storing the labels of the k nearest neighbors to
# the ith test point.
closest_y = []
#########################################################################
# TODO: #
# Use the distance matrix to find the k nearest neighbors of the ith #
# training point, and use self.y_train to find the labels of these #
# neighbors. Store these labels in closest_y. #
# Hint: Look up the function numpy.argsort. #
#########################################################################
pass
#########################################################################
# TODO: #
# Now that you have found the labels of the k nearest neighbors, you #
# need to find the most common label in the list closest_y of labels. #
# Store this label in y_pred[i]. Break ties by choosing the smaller #
# label. #
#########################################################################
pass
#########################################################################
# END OF YOUR CODE #
#########################################################################
return y_pred
| {
"content_hash": "1180c60ba4a730cb447ca8e228f6c5d0",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 80,
"avg_line_length": 43.22360248447205,
"alnum_prop": 0.4323897111653973,
"repo_name": "DeercoderCourse/cs231n",
"id": "cc094f8fe033bea5b083583d11051e8e3727a0ab",
"size": "6959",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "assignment1/cs231n/classifiers/k_nearest_neighbor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "148422"
},
{
"name": "Shell",
"bytes": "1296"
}
],
"symlink_target": ""
} |
""" Handler for the generated preferences stuff. """
import base64
import codecs
import logging
from StringIO import StringIO
from xml.parsers.expat import ExpatError
from datafinder.core.configuration.gen import preferences
from datafinder.core.error import ConfigurationError
from datafinder.persistence.error import PersistenceError
__version__ = "$Revision-Id:$"
_DEFAULT_ENCODING = "UTF-8"
preferences.ExternalEncoding = _DEFAULT_ENCODING
class PreferencesHandler(object):
""" Handles the local preferences information. """
_streamWriterClass = codecs.getwriter(_DEFAULT_ENCODING)
_preferencesFileName = "preferences.xml"
_log = logging.getLogger()
def __init__(self, fileStorer):
"""
Constructor.
@param fileStorer: Points to the parent directory of the preferences file.
@type fileStorer: L{FileStorer<datafinder.persistence.factory.FileStorer>}
@note: Call C{load} to initialize the handler.
"""
self._fileStorer = fileStorer.getChild(self._preferencesFileName)
self._connections = None
self._connectionOrder = list()
self._preferences = None
def _reset(self):
""" Resets current configuration. """
self._connections = None
self._connectionOrder = list()
self._preferences = None
def load(self):
"""
Loads the preferences.
@note: When a problem occurs a new default preferences configuration is created.
"""
self._reset()
try:
if self._fileStorer.isLeaf:
data = self._fileStorer.readData()
content = data.read()
data.close()
try:
self._preferences = preferences.parseString(unicode(content, _DEFAULT_ENCODING))
except (ValueError, ExpatError, UnicodeDecodeError, SyntaxError):
self._log.error("Problem occurred during parsing preferences. Default preferences used.", exc_info=True)
self._preferences = self._getDefaultPreferences()
else:
self._preferences = self._getDefaultPreferences()
except PersistenceError:
self._preferences = self._getDefaultPreferences()
self._getConnections()
@staticmethod
def _getDefaultPreferences():
""" Creates the default preferences. """
return preferences.preferences(scriptUris=list(), searchQueries=list())
def store(self):
"""
Stores the preferences.
@raise ConfigurationError: Indicating problems on storage.
"""
try:
if not self._fileStorer.exists():
self._fileStorer.createResource()
stream = self._streamWriterClass(StringIO())
self._preferences.connections = list()
for connectionUri in self._connectionOrder:
connection = self._connections[connectionUri]
if connection.password is None:
encryptedPassword = None
else:
encryptedPassword = base64.encodestring(connection.password)
copiedConnection = preferences.connection(connection.url, connection.username, encryptedPassword,
connection.useLdap, connection.ldapServerUri, connection.ldapBaseDn,
connection.useLucene, connection.luceneIndexUri,
connection.defaultDataStore, connection.defaultArchiveStore,
connection.defaultOfflineStore)
if not copiedConnection.url is None:
self._preferences.addConnections(copiedConnection)
self._preferences.__dict__.update(self.__dict__)
try:
self._preferences.export(stream, 0)
except ExpatError:
raise ConfigurationError("Cannot persist preferences configuration.")
stream.seek(0)
self._fileStorer.writeData(stream)
except PersistenceError, error:
raise ConfigurationError("Unable to store preferences file.\nReason: '%s'" % error.message)
def getConnection(self, configurationUri):
"""
Returns the connection information for the given URI.
@param configurationUri: URI of the configuration.
@type configurationUri: C{unicode}
@return: Object containing the configuration parameters.
@rtype: C{object}
"""
result = None
if not configurationUri is None:
configurationUri = self._normalizeConfigurationUri(configurationUri)
if configurationUri in self._connections:
result = self._connections[configurationUri]
return result
def addScriptUri(self, scriptUri):
"""
Adds a script URI to the preferences.
@param scriptUri: URI identifying the script extension.
@type scriptUri: C{unicode}
"""
if not scriptUri in self._preferences.scriptUris:
self._preferences.scriptUris.append(scriptUri)
def removeScriptUri(self, scriptUri):
"""
Removes a script URI from the preferences.
@param scriptPath: URI identifying the script extension.
@type scriptPath: C{unicode}
"""
if scriptUri in self._preferences.scriptUris:
self._preferences.scriptUris.remove(scriptUri)
def clearScriptUris(self):
"""
Removes all existing script URIs from preferences.
"""
self._preferences.scriptUris = list()
def addSearchQuery(self, name, query):
"""
Adds a search query to the preferences.
@param name: Name of the search query.
@type name: C{unicode}
@param query: A search query string.
@type query: C{unicode}
"""
if not name is None and not query is None:
searchQuery = self._getSearchQuery(name)
if searchQuery is None:
searchQuery = preferences.searchQuery(name, query)
self._preferences.searchQueries.append(searchQuery)
else:
searchQuery.query = query
def _getSearchQuery(self, name):
""" Returns the query under the given name or C{None} if it does not exist. """
for searchQuery in self._preferences.searchQueries:
if searchQuery.name == name:
return searchQuery
return None
def removeSearchQuery(self, name):
"""
Removes a search query from the preferences.
@param name: Name of the search query.
@type name: C{unicode}
"""
searchQuery = self._getSearchQuery(name)
if not searchQuery is None:
self._preferences.searchQueries.remove(searchQuery)
def clearSearchQueries(self):
"""
Removes all existing search queries from preferences.
"""
self._preferences.searchQueries = list()
def addConnection(self, configurationUri, username=None, password=None,
useLdap=None, ldapServerUri=None, ldapBaseDn=None,
useLucene=None, luceneIndexUri=None,
defaultDataStore=None, defaultArchiveStore=None, defaultOfflineStore=None):
"""
Adds a connection.
@param configurationUri: URI of the configuration.
@type configurationUri: C{unicode}
@param username: Username for authentication.
@type username: C{unicode}
@param password: Not encrypted password.
@type password: C{unicode}
"""
if not configurationUri is None:
configurationUri = self._normalizeConfigurationUri(configurationUri)
if configurationUri in self._connectionOrder:
connection = self.getConnection(configurationUri)
self._connectionOrder.remove(configurationUri)
else:
connection = preferences.connection(configurationUri, username, password, useLdap,
ldapServerUri, ldapBaseDn, useLucene, luceneIndexUri,
defaultDataStore, defaultArchiveStore)
connection.username = username
connection.password = password
connection.useLdap = useLdap
connection.ldapServerUri = ldapServerUri
connection.ldapBaseDn = ldapBaseDn
connection.useLucene = useLucene
connection.luceneIndexUri = luceneIndexUri
connection.defaultDataStore = defaultDataStore
connection.defaultArchiveStore = defaultArchiveStore
connection.defaultOfflineStore = defaultOfflineStore
self._connections[configurationUri] = connection
self._connectionOrder.insert(0, configurationUri)
@staticmethod
def _normalizeConfigurationUri(configurationUri):
""" Ensures that the path component of the URI is in the correct format,
i.e. without trailing slash. """
if configurationUri.endswith("/"):
configurationUri = configurationUri[:-1]
return configurationUri
def removeConnection(self, configurationUri):
"""
Removes a connection.
@param configurationUri: URI of the configuration.
@type configurationUri: C{unicode}
"""
if not configurationUri is None:
configurationUri = self._normalizeConfigurationUri(configurationUri)
if configurationUri in self._connections:
del self._connections[configurationUri]
if configurationUri in self._connectionOrder:
self._connectionOrder.remove(configurationUri)
def clearConnections(self):
""" Clears all connections. """
self._connections.clear()
self._connectionOrder = list()
def _getConnections(self):
""" Getter for the connections. """
if self._connections is None or self._connectionOrder is None:
self._connections = dict()
for connection in self._preferences.connections:
if not connection.url is None:
self._connectionOrder.append(connection.url)
decryptedPassword = connection.password
if not decryptedPassword is None:
decryptedPassword = base64.decodestring(connection.password)
copiedConnection = preferences.connection(connection.url, connection.username, decryptedPassword,
connection.useLdap, connection.ldapServerUri, connection.ldapBaseDn,
connection.useLucene, connection.luceneIndexUri,
connection.defaultDataStore, connection.defaultArchiveStore,
connection.defaultOfflineStore)
self._connections[copiedConnection.url] = copiedConnection
return self._connectionOrder[:]
connectionUris = property(_getConnections)
def __getattr__(self, name):
""" Automatically redirects property calls to the generated class. """
return getattr(self._preferences, name)
| {
"content_hash": "d2fc202cb81a843fef6c6e8942777fc8",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 130,
"avg_line_length": 40.29934210526316,
"alnum_prop": 0.5737490817076157,
"repo_name": "DLR-SC/DataFinder",
"id": "07b1cc97ac59671bf113fb595f8c8764f50db5a5",
"size": "13945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/datafinder/core/configuration/preferences.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "7649"
},
{
"name": "Python",
"bytes": "7056802"
},
{
"name": "QMake",
"bytes": "1975"
}
],
"symlink_target": ""
} |
Subsets and Splits