repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
googleads/google-ads-python | google/ads/googleads/v6/enums/types/matching_function_context_type.py | 1 | 1242 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v6.enums",
marshal="google.ads.googleads.v6",
manifest={"MatchingFunctionContextTypeEnum",},
)
class MatchingFunctionContextTypeEnum(proto.Message):
r"""Container for context types for an operand in a matching
function.
"""
class MatchingFunctionContextType(proto.Enum):
r"""Possible context types for an operand in a matching function."""
UNSPECIFIED = 0
UNKNOWN = 1
FEED_ITEM_ID = 2
DEVICE_NAME = 3
FEED_ITEM_SET_ID = 4
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 1,692,842,941,226,915,300 | 28.571429 | 76 | 0.699678 | false |
grouan/udata | udata/tests/site/test_site_api.py | 1 | 3123 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import date
from flask import url_for
from udata.core.site.models import Site
from udata.core.site.metrics import SiteMetric
from udata.core.site.views import current_site
from udata.models import db, WithMetrics
from udata.tests.api import APITestCase
from udata.tests.factories import (
AdminFactory, VisibleDatasetFactory, VisibleReuseFactory, SiteFactory
)
class FakeModel(db.Document, WithMetrics):
name = db.StringField()
class FakeSiteMetric(SiteMetric):
name = 'fake-site-metric'
display_name = 'Fake site metric'
default = 0
def get_value(self):
return 2
class MetricsAPITest(APITestCase):
def test_get_metrics_for_site(self):
'''It should fetch my user data on GET'''
with self.app.app_context():
FakeSiteMetric.update()
response = self.get(url_for('api.metrics', id='site'))
self.assert200(response)
data = response.json[0]
self.assertEqual(data['level'], 'daily')
self.assertEqual(data['date'], date.today().isoformat())
self.assertIn('fake-site-metric', data['values'])
self.assertEqual(data['values']['fake-site-metric'], 2)
class SiteAPITest(APITestCase):
def test_get_site(self):
response = self.get(url_for('api.site'))
self.assert200(response)
def test_get_home_datasets(self):
site = SiteFactory.create(
id=self.app.config['SITE_ID'],
settings__home_datasets=VisibleDatasetFactory.create_batch(3)
)
current_site.reload()
self.login(AdminFactory())
response = self.get(url_for('api.home_datasets'))
self.assert200(response)
self.assertEqual(len(response.json), len(site.settings.home_datasets))
def test_get_home_reuses(self):
site = SiteFactory.create(
id=self.app.config['SITE_ID'],
settings__home_reuses=VisibleReuseFactory.create_batch(3)
)
current_site.reload()
self.login(AdminFactory())
response = self.get(url_for('api.home_reuses'))
self.assert200(response)
self.assertEqual(len(response.json), len(site.settings.home_reuses))
def test_set_home_datasets(self):
ids = [d.id for d in VisibleDatasetFactory.create_batch(3)]
self.login(AdminFactory())
response = self.put(url_for('api.home_datasets'), ids)
self.assert200(response)
self.assertEqual(len(response.json), len(ids))
site = Site.objects.get(id=self.app.config['SITE_ID'])
self.assertEqual([d.id for d in site.settings.home_datasets], ids)
def test_set_home_reuses(self):
ids = [r.id for r in VisibleReuseFactory.create_batch(3)]
self.login(AdminFactory())
response = self.put(url_for('api.home_reuses'), ids)
self.assert200(response)
self.assertEqual(len(response.json), len(ids))
site = Site.objects.get(id=self.app.config['SITE_ID'])
self.assertEqual([r.id for r in site.settings.home_reuses], ids)
| agpl-3.0 | 5,911,655,990,735,081,000 | 29.320388 | 78 | 0.651937 | false |
Azure/azure-sdk-for-python | sdk/storage/azure-storage-file-share/azure/storage/fileshare/_generated/models/_models_py3.py | 1 | 39940 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._azure_file_storage_enums import *
class AccessPolicy(msrest.serialization.Model):
"""An Access policy.
:param start: The date-time the policy is active.
:type start: str
:param expiry: The date-time the policy expires.
:type expiry: str
:param permission: The permissions for the ACL policy.
:type permission: str
"""
_attribute_map = {
'start': {'key': 'Start', 'type': 'str'},
'expiry': {'key': 'Expiry', 'type': 'str'},
'permission': {'key': 'Permission', 'type': 'str'},
}
def __init__(
self,
*,
start: Optional[str] = None,
expiry: Optional[str] = None,
permission: Optional[str] = None,
**kwargs
):
super(AccessPolicy, self).__init__(**kwargs)
self.start = start
self.expiry = expiry
self.permission = permission
class ClearRange(msrest.serialization.Model):
"""ClearRange.
All required parameters must be populated in order to send to Azure.
:param start: Required.
:type start: long
:param end: Required.
:type end: long
"""
_validation = {
'start': {'required': True},
'end': {'required': True},
}
_attribute_map = {
'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}},
'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}},
}
_xml_map = {
'name': 'ClearRange'
}
def __init__(
self,
*,
start: int,
end: int,
**kwargs
):
super(ClearRange, self).__init__(**kwargs)
self.start = start
self.end = end
class CopyFileSmbInfo(msrest.serialization.Model):
"""Parameter group.
:param file_permission_copy_mode: Specifies the option to copy file security descriptor from
source file or to set it using the value which is defined by the header value of x-ms-file-
permission or x-ms-file-permission-key. Possible values include: "source", "override".
:type file_permission_copy_mode: str or ~azure.storage.fileshare.models.PermissionCopyModeType
:param ignore_read_only: Specifies the option to overwrite the target file if it already exists
and has read-only attribute set.
:type ignore_read_only: bool
:param file_attributes: Specifies either the option to copy file attributes from a source
file(source) to a target file or a list of attributes to set on a target file.
:type file_attributes: str
:param file_creation_time: Specifies either the option to copy file creation time from a source
file(source) to a target file or a time value in ISO 8601 format to set as creation time on a
target file.
:type file_creation_time: str
:param file_last_write_time: Specifies either the option to copy file last write time from a
source file(source) to a target file or a time value in ISO 8601 format to set as last write
time on a target file.
:type file_last_write_time: str
:param set_archive_attribute: Specifies the option to set archive attribute on a target file.
True means archive attribute will be set on a target file despite attribute overrides or a
source file state.
:type set_archive_attribute: bool
"""
_attribute_map = {
'file_permission_copy_mode': {'key': 'filePermissionCopyMode', 'type': 'str'},
'ignore_read_only': {'key': 'ignoreReadOnly', 'type': 'bool'},
'file_attributes': {'key': 'fileAttributes', 'type': 'str'},
'file_creation_time': {'key': 'fileCreationTime', 'type': 'str'},
'file_last_write_time': {'key': 'fileLastWriteTime', 'type': 'str'},
'set_archive_attribute': {'key': 'setArchiveAttribute', 'type': 'bool'},
}
def __init__(
self,
*,
file_permission_copy_mode: Optional[Union[str, "PermissionCopyModeType"]] = None,
ignore_read_only: Optional[bool] = None,
file_attributes: Optional[str] = None,
file_creation_time: Optional[str] = None,
file_last_write_time: Optional[str] = None,
set_archive_attribute: Optional[bool] = None,
**kwargs
):
super(CopyFileSmbInfo, self).__init__(**kwargs)
self.file_permission_copy_mode = file_permission_copy_mode
self.ignore_read_only = ignore_read_only
self.file_attributes = file_attributes
self.file_creation_time = file_creation_time
self.file_last_write_time = file_last_write_time
self.set_archive_attribute = set_archive_attribute
class CorsRule(msrest.serialization.Model):
"""CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain.
All required parameters must be populated in order to send to Azure.
:param allowed_origins: Required. The origin domains that are permitted to make a request
against the storage service via CORS. The origin domain is the domain from which the request
originates. Note that the origin must be an exact case-sensitive match with the origin that the
user age sends to the service. You can also use the wildcard character '*' to allow all origin
domains to make requests via CORS.
:type allowed_origins: str
:param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may
use for a CORS request. (comma separated).
:type allowed_methods: str
:param allowed_headers: Required. The request headers that the origin domain may specify on the
CORS request.
:type allowed_headers: str
:param exposed_headers: Required. The response headers that may be sent in the response to the
CORS request and exposed by the browser to the request issuer.
:type exposed_headers: str
:param max_age_in_seconds: Required. The maximum amount time that a browser should cache the
preflight OPTIONS request.
:type max_age_in_seconds: int
"""
_validation = {
'allowed_origins': {'required': True},
'allowed_methods': {'required': True},
'allowed_headers': {'required': True},
'exposed_headers': {'required': True},
'max_age_in_seconds': {'required': True, 'minimum': 0},
}
_attribute_map = {
'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'},
'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'},
'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'},
'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'},
'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'},
}
def __init__(
self,
*,
allowed_origins: str,
allowed_methods: str,
allowed_headers: str,
exposed_headers: str,
max_age_in_seconds: int,
**kwargs
):
super(CorsRule, self).__init__(**kwargs)
self.allowed_origins = allowed_origins
self.allowed_methods = allowed_methods
self.allowed_headers = allowed_headers
self.exposed_headers = exposed_headers
self.max_age_in_seconds = max_age_in_seconds
class DirectoryItem(msrest.serialization.Model):
"""A listed directory item.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'Name', 'type': 'str'},
}
_xml_map = {
'name': 'Directory'
}
def __init__(
self,
*,
name: str,
**kwargs
):
super(DirectoryItem, self).__init__(**kwargs)
self.name = name
class FileHTTPHeaders(msrest.serialization.Model):
"""Parameter group.
:param file_content_type: Sets the MIME content type of the file. The default type is
'application/octet-stream'.
:type file_content_type: str
:param file_content_encoding: Specifies which content encodings have been applied to the file.
:type file_content_encoding: str
:param file_content_language: Specifies the natural languages used by this resource.
:type file_content_language: str
:param file_cache_control: Sets the file's cache control. The File service stores this value
but does not use or modify it.
:type file_cache_control: str
:param file_content_md5: Sets the file's MD5 hash.
:type file_content_md5: bytearray
:param file_content_disposition: Sets the file's Content-Disposition header.
:type file_content_disposition: str
"""
_attribute_map = {
'file_content_type': {'key': 'fileContentType', 'type': 'str'},
'file_content_encoding': {'key': 'fileContentEncoding', 'type': 'str'},
'file_content_language': {'key': 'fileContentLanguage', 'type': 'str'},
'file_cache_control': {'key': 'fileCacheControl', 'type': 'str'},
'file_content_md5': {'key': 'fileContentMD5', 'type': 'bytearray'},
'file_content_disposition': {'key': 'fileContentDisposition', 'type': 'str'},
}
def __init__(
self,
*,
file_content_type: Optional[str] = None,
file_content_encoding: Optional[str] = None,
file_content_language: Optional[str] = None,
file_cache_control: Optional[str] = None,
file_content_md5: Optional[bytearray] = None,
file_content_disposition: Optional[str] = None,
**kwargs
):
super(FileHTTPHeaders, self).__init__(**kwargs)
self.file_content_type = file_content_type
self.file_content_encoding = file_content_encoding
self.file_content_language = file_content_language
self.file_cache_control = file_cache_control
self.file_content_md5 = file_content_md5
self.file_content_disposition = file_content_disposition
class FileItem(msrest.serialization.Model):
"""A listed file item.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
:param properties: Required. File properties.
:type properties: ~azure.storage.fileshare.models.FileProperty
"""
_validation = {
'name': {'required': True},
'properties': {'required': True},
}
_attribute_map = {
'name': {'key': 'Name', 'type': 'str'},
'properties': {'key': 'Properties', 'type': 'FileProperty'},
}
_xml_map = {
'name': 'File'
}
def __init__(
self,
*,
name: str,
properties: "FileProperty",
**kwargs
):
super(FileItem, self).__init__(**kwargs)
self.name = name
self.properties = properties
class FileProperty(msrest.serialization.Model):
"""File properties.
All required parameters must be populated in order to send to Azure.
:param content_length: Required. Content length of the file. This value may not be up-to-date
since an SMB client may have modified the file locally. The value of Content-Length may not
reflect that fact until the handle is closed or the op-lock is broken. To retrieve current
property values, call Get File Properties.
:type content_length: long
"""
_validation = {
'content_length': {'required': True},
}
_attribute_map = {
'content_length': {'key': 'Content-Length', 'type': 'long'},
}
def __init__(
self,
*,
content_length: int,
**kwargs
):
super(FileProperty, self).__init__(**kwargs)
self.content_length = content_length
class FileRange(msrest.serialization.Model):
"""An Azure Storage file range.
All required parameters must be populated in order to send to Azure.
:param start: Required. Start of the range.
:type start: long
:param end: Required. End of the range.
:type end: long
"""
_validation = {
'start': {'required': True},
'end': {'required': True},
}
_attribute_map = {
'start': {'key': 'Start', 'type': 'long'},
'end': {'key': 'End', 'type': 'long'},
}
_xml_map = {
'name': 'Range'
}
def __init__(
self,
*,
start: int,
end: int,
**kwargs
):
super(FileRange, self).__init__(**kwargs)
self.start = start
self.end = end
class FilesAndDirectoriesListSegment(msrest.serialization.Model):
"""Abstract for entries that can be listed from Directory.
All required parameters must be populated in order to send to Azure.
:param directory_items: Required.
:type directory_items: list[~azure.storage.fileshare.models.DirectoryItem]
:param file_items: Required.
:type file_items: list[~azure.storage.fileshare.models.FileItem]
"""
_validation = {
'directory_items': {'required': True},
'file_items': {'required': True},
}
_attribute_map = {
'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]'},
'file_items': {'key': 'FileItems', 'type': '[FileItem]'},
}
_xml_map = {
'name': 'Entries'
}
def __init__(
self,
*,
directory_items: List["DirectoryItem"],
file_items: List["FileItem"],
**kwargs
):
super(FilesAndDirectoriesListSegment, self).__init__(**kwargs)
self.directory_items = directory_items
self.file_items = file_items
class HandleItem(msrest.serialization.Model):
"""A listed Azure Storage handle item.
All required parameters must be populated in order to send to Azure.
:param handle_id: Required. XSMB service handle ID.
:type handle_id: str
:param path: Required. File or directory name including full path starting from share root.
:type path: str
:param file_id: Required. FileId uniquely identifies the file or directory.
:type file_id: str
:param parent_id: ParentId uniquely identifies the parent directory of the object.
:type parent_id: str
:param session_id: Required. SMB session ID in context of which the file handle was opened.
:type session_id: str
:param client_ip: Required. Client IP that opened the handle.
:type client_ip: str
:param open_time: Required. Time when the session that previously opened the handle has last
been reconnected. (UTC).
:type open_time: ~datetime.datetime
:param last_reconnect_time: Time handle was last connected to (UTC).
:type last_reconnect_time: ~datetime.datetime
"""
_validation = {
'handle_id': {'required': True},
'path': {'required': True},
'file_id': {'required': True},
'session_id': {'required': True},
'client_ip': {'required': True},
'open_time': {'required': True},
}
_attribute_map = {
'handle_id': {'key': 'HandleId', 'type': 'str'},
'path': {'key': 'Path', 'type': 'str'},
'file_id': {'key': 'FileId', 'type': 'str'},
'parent_id': {'key': 'ParentId', 'type': 'str'},
'session_id': {'key': 'SessionId', 'type': 'str'},
'client_ip': {'key': 'ClientIp', 'type': 'str'},
'open_time': {'key': 'OpenTime', 'type': 'rfc-1123'},
'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123'},
}
_xml_map = {
'name': 'Handle'
}
def __init__(
self,
*,
handle_id: str,
path: str,
file_id: str,
session_id: str,
client_ip: str,
open_time: datetime.datetime,
parent_id: Optional[str] = None,
last_reconnect_time: Optional[datetime.datetime] = None,
**kwargs
):
super(HandleItem, self).__init__(**kwargs)
self.handle_id = handle_id
self.path = path
self.file_id = file_id
self.parent_id = parent_id
self.session_id = session_id
self.client_ip = client_ip
self.open_time = open_time
self.last_reconnect_time = last_reconnect_time
class LeaseAccessConditions(msrest.serialization.Model):
"""Parameter group.
:param lease_id: If specified, the operation only succeeds if the resource's lease is active
and matches this ID.
:type lease_id: str
"""
_attribute_map = {
'lease_id': {'key': 'leaseId', 'type': 'str'},
}
def __init__(
self,
*,
lease_id: Optional[str] = None,
**kwargs
):
super(LeaseAccessConditions, self).__init__(**kwargs)
self.lease_id = lease_id
class ListFilesAndDirectoriesSegmentResponse(msrest.serialization.Model):
"""An enumeration of directories and files.
All required parameters must be populated in order to send to Azure.
:param service_endpoint: Required.
:type service_endpoint: str
:param share_name: Required.
:type share_name: str
:param share_snapshot:
:type share_snapshot: str
:param directory_path: Required.
:type directory_path: str
:param prefix: Required.
:type prefix: str
:param marker:
:type marker: str
:param max_results:
:type max_results: int
:param segment: Required. Abstract for entries that can be listed from Directory.
:type segment: ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment
:param next_marker: Required.
:type next_marker: str
"""
_validation = {
'service_endpoint': {'required': True},
'share_name': {'required': True},
'directory_path': {'required': True},
'prefix': {'required': True},
'segment': {'required': True},
'next_marker': {'required': True},
}
_attribute_map = {
'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}},
'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'attr': True}},
'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'attr': True}},
'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'attr': True}},
'prefix': {'key': 'Prefix', 'type': 'str'},
'marker': {'key': 'Marker', 'type': 'str'},
'max_results': {'key': 'MaxResults', 'type': 'int'},
'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment'},
'next_marker': {'key': 'NextMarker', 'type': 'str'},
}
_xml_map = {
'name': 'EnumerationResults'
}
def __init__(
self,
*,
service_endpoint: str,
share_name: str,
directory_path: str,
prefix: str,
segment: "FilesAndDirectoriesListSegment",
next_marker: str,
share_snapshot: Optional[str] = None,
marker: Optional[str] = None,
max_results: Optional[int] = None,
**kwargs
):
super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs)
self.service_endpoint = service_endpoint
self.share_name = share_name
self.share_snapshot = share_snapshot
self.directory_path = directory_path
self.prefix = prefix
self.marker = marker
self.max_results = max_results
self.segment = segment
self.next_marker = next_marker
class ListHandlesResponse(msrest.serialization.Model):
"""An enumeration of handles.
All required parameters must be populated in order to send to Azure.
:param handle_list:
:type handle_list: list[~azure.storage.fileshare.models.HandleItem]
:param next_marker: Required.
:type next_marker: str
"""
_validation = {
'next_marker': {'required': True},
}
_attribute_map = {
'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'wrapped': True, 'itemsName': 'Handle'}},
'next_marker': {'key': 'NextMarker', 'type': 'str'},
}
_xml_map = {
'name': 'EnumerationResults'
}
def __init__(
self,
*,
next_marker: str,
handle_list: Optional[List["HandleItem"]] = None,
**kwargs
):
super(ListHandlesResponse, self).__init__(**kwargs)
self.handle_list = handle_list
self.next_marker = next_marker
class ListSharesResponse(msrest.serialization.Model):
"""An enumeration of shares.
All required parameters must be populated in order to send to Azure.
:param service_endpoint: Required.
:type service_endpoint: str
:param prefix:
:type prefix: str
:param marker:
:type marker: str
:param max_results:
:type max_results: int
:param share_items:
:type share_items: list[~azure.storage.fileshare.models.ShareItemInternal]
:param next_marker: Required.
:type next_marker: str
"""
_validation = {
'service_endpoint': {'required': True},
'next_marker': {'required': True},
}
_attribute_map = {
'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}},
'prefix': {'key': 'Prefix', 'type': 'str'},
'marker': {'key': 'Marker', 'type': 'str'},
'max_results': {'key': 'MaxResults', 'type': 'int'},
'share_items': {'key': 'ShareItems', 'type': '[ShareItemInternal]', 'xml': {'name': 'Shares', 'wrapped': True, 'itemsName': 'Share'}},
'next_marker': {'key': 'NextMarker', 'type': 'str'},
}
_xml_map = {
'name': 'EnumerationResults'
}
def __init__(
self,
*,
service_endpoint: str,
next_marker: str,
prefix: Optional[str] = None,
marker: Optional[str] = None,
max_results: Optional[int] = None,
share_items: Optional[List["ShareItemInternal"]] = None,
**kwargs
):
super(ListSharesResponse, self).__init__(**kwargs)
self.service_endpoint = service_endpoint
self.prefix = prefix
self.marker = marker
self.max_results = max_results
self.share_items = share_items
self.next_marker = next_marker
class Metrics(msrest.serialization.Model):
"""Storage Analytics metrics for file service.
All required parameters must be populated in order to send to Azure.
:param version: Required. The version of Storage Analytics to configure.
:type version: str
:param enabled: Required. Indicates whether metrics are enabled for the File service.
:type enabled: bool
:param include_apis: Indicates whether metrics should generate summary statistics for called
API operations.
:type include_apis: bool
:param retention_policy: The retention policy.
:type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy
"""
_validation = {
'version': {'required': True},
'enabled': {'required': True},
}
_attribute_map = {
'version': {'key': 'Version', 'type': 'str'},
'enabled': {'key': 'Enabled', 'type': 'bool'},
'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'},
'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'},
}
def __init__(
self,
*,
version: str,
enabled: bool,
include_apis: Optional[bool] = None,
retention_policy: Optional["RetentionPolicy"] = None,
**kwargs
):
super(Metrics, self).__init__(**kwargs)
self.version = version
self.enabled = enabled
self.include_apis = include_apis
self.retention_policy = retention_policy
class RetentionPolicy(msrest.serialization.Model):
"""The retention policy.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Indicates whether a retention policy is enabled for the File service.
If false, metrics data is retained, and the user is responsible for deleting it.
:type enabled: bool
:param days: Indicates the number of days that metrics data should be retained. All data older
than this value will be deleted. Metrics data is deleted on a best-effort basis after the
retention period expires.
:type days: int
"""
_validation = {
'enabled': {'required': True},
'days': {'maximum': 365, 'minimum': 1},
}
_attribute_map = {
'enabled': {'key': 'Enabled', 'type': 'bool'},
'days': {'key': 'Days', 'type': 'int'},
}
def __init__(
self,
*,
enabled: bool,
days: Optional[int] = None,
**kwargs
):
super(RetentionPolicy, self).__init__(**kwargs)
self.enabled = enabled
self.days = days
class ShareFileRangeList(msrest.serialization.Model):
"""The list of file ranges.
:param ranges:
:type ranges: list[~azure.storage.fileshare.models.FileRange]
:param clear_ranges:
:type clear_ranges: list[~azure.storage.fileshare.models.ClearRange]
"""
_attribute_map = {
'ranges': {'key': 'Ranges', 'type': '[FileRange]'},
'clear_ranges': {'key': 'ClearRanges', 'type': '[ClearRange]'},
}
def __init__(
self,
*,
ranges: Optional[List["FileRange"]] = None,
clear_ranges: Optional[List["ClearRange"]] = None,
**kwargs
):
super(ShareFileRangeList, self).__init__(**kwargs)
self.ranges = ranges
self.clear_ranges = clear_ranges
class ShareItemInternal(msrest.serialization.Model):
"""A listed Azure Storage share item.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
:param snapshot:
:type snapshot: str
:param deleted:
:type deleted: bool
:param version:
:type version: str
:param properties: Required. Properties of a share.
:type properties: ~azure.storage.fileshare.models.SharePropertiesInternal
:param metadata: Dictionary of :code:`<string>`.
:type metadata: dict[str, str]
"""
_validation = {
'name': {'required': True},
'properties': {'required': True},
}
_attribute_map = {
'name': {'key': 'Name', 'type': 'str'},
'snapshot': {'key': 'Snapshot', 'type': 'str'},
'deleted': {'key': 'Deleted', 'type': 'bool'},
'version': {'key': 'Version', 'type': 'str'},
'properties': {'key': 'Properties', 'type': 'SharePropertiesInternal'},
'metadata': {'key': 'Metadata', 'type': '{str}'},
}
_xml_map = {
'name': 'Share'
}
def __init__(
self,
*,
name: str,
properties: "SharePropertiesInternal",
snapshot: Optional[str] = None,
deleted: Optional[bool] = None,
version: Optional[str] = None,
metadata: Optional[Dict[str, str]] = None,
**kwargs
):
super(ShareItemInternal, self).__init__(**kwargs)
self.name = name
self.snapshot = snapshot
self.deleted = deleted
self.version = version
self.properties = properties
self.metadata = metadata
class SharePermission(msrest.serialization.Model):
"""A permission (a security descriptor) at the share level.
All required parameters must be populated in order to send to Azure.
:param permission: Required. The permission in the Security Descriptor Definition Language
(SDDL).
:type permission: str
"""
_validation = {
'permission': {'required': True},
}
_attribute_map = {
'permission': {'key': 'permission', 'type': 'str'},
}
def __init__(
self,
*,
permission: str,
**kwargs
):
super(SharePermission, self).__init__(**kwargs)
self.permission = permission
class SharePropertiesInternal(msrest.serialization.Model):
"""Properties of a share.
All required parameters must be populated in order to send to Azure.
:param last_modified: Required.
:type last_modified: ~datetime.datetime
:param etag: Required.
:type etag: str
:param quota: Required.
:type quota: int
:param provisioned_iops:
:type provisioned_iops: int
:param provisioned_ingress_m_bps:
:type provisioned_ingress_m_bps: int
:param provisioned_egress_m_bps:
:type provisioned_egress_m_bps: int
:param next_allowed_quota_downgrade_time:
:type next_allowed_quota_downgrade_time: ~datetime.datetime
:param deleted_time:
:type deleted_time: ~datetime.datetime
:param remaining_retention_days:
:type remaining_retention_days: int
:param access_tier:
:type access_tier: str
:param access_tier_change_time:
:type access_tier_change_time: ~datetime.datetime
:param access_tier_transition_state:
:type access_tier_transition_state: str
:param lease_status: The current lease status of the share. Possible values include: "locked",
"unlocked".
:type lease_status: str or ~azure.storage.fileshare.models.LeaseStatusType
:param lease_state: Lease state of the share. Possible values include: "available", "leased",
"expired", "breaking", "broken".
:type lease_state: str or ~azure.storage.fileshare.models.LeaseStateType
:param lease_duration: When a share is leased, specifies whether the lease is of infinite or
fixed duration. Possible values include: "infinite", "fixed".
:type lease_duration: str or ~azure.storage.fileshare.models.LeaseDurationType
:param enabled_protocols:
:type enabled_protocols: str
:param root_squash: Possible values include: "NoRootSquash", "RootSquash", "AllSquash".
:type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash
"""
_validation = {
'last_modified': {'required': True},
'etag': {'required': True},
'quota': {'required': True},
}
_attribute_map = {
'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'},
'etag': {'key': 'Etag', 'type': 'str'},
'quota': {'key': 'Quota', 'type': 'int'},
'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int'},
'provisioned_ingress_m_bps': {'key': 'ProvisionedIngressMBps', 'type': 'int'},
'provisioned_egress_m_bps': {'key': 'ProvisionedEgressMBps', 'type': 'int'},
'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123'},
'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'},
'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'},
'access_tier': {'key': 'AccessTier', 'type': 'str'},
'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'},
'access_tier_transition_state': {'key': 'AccessTierTransitionState', 'type': 'str'},
'lease_status': {'key': 'LeaseStatus', 'type': 'str'},
'lease_state': {'key': 'LeaseState', 'type': 'str'},
'lease_duration': {'key': 'LeaseDuration', 'type': 'str'},
'enabled_protocols': {'key': 'EnabledProtocols', 'type': 'str'},
'root_squash': {'key': 'RootSquash', 'type': 'str'},
}
def __init__(
self,
*,
last_modified: datetime.datetime,
etag: str,
quota: int,
provisioned_iops: Optional[int] = None,
provisioned_ingress_m_bps: Optional[int] = None,
provisioned_egress_m_bps: Optional[int] = None,
next_allowed_quota_downgrade_time: Optional[datetime.datetime] = None,
deleted_time: Optional[datetime.datetime] = None,
remaining_retention_days: Optional[int] = None,
access_tier: Optional[str] = None,
access_tier_change_time: Optional[datetime.datetime] = None,
access_tier_transition_state: Optional[str] = None,
lease_status: Optional[Union[str, "LeaseStatusType"]] = None,
lease_state: Optional[Union[str, "LeaseStateType"]] = None,
lease_duration: Optional[Union[str, "LeaseDurationType"]] = None,
enabled_protocols: Optional[str] = None,
root_squash: Optional[Union[str, "ShareRootSquash"]] = None,
**kwargs
):
super(SharePropertiesInternal, self).__init__(**kwargs)
self.last_modified = last_modified
self.etag = etag
self.quota = quota
self.provisioned_iops = provisioned_iops
self.provisioned_ingress_m_bps = provisioned_ingress_m_bps
self.provisioned_egress_m_bps = provisioned_egress_m_bps
self.next_allowed_quota_downgrade_time = next_allowed_quota_downgrade_time
self.deleted_time = deleted_time
self.remaining_retention_days = remaining_retention_days
self.access_tier = access_tier
self.access_tier_change_time = access_tier_change_time
self.access_tier_transition_state = access_tier_transition_state
self.lease_status = lease_status
self.lease_state = lease_state
self.lease_duration = lease_duration
self.enabled_protocols = enabled_protocols
self.root_squash = root_squash
class ShareProtocolSettings(msrest.serialization.Model):
"""Protocol settings.
:param smb: Settings for SMB protocol.
:type smb: ~azure.storage.fileshare.models.ShareSmbSettings
"""
_attribute_map = {
'smb': {'key': 'Smb', 'type': 'ShareSmbSettings', 'xml': {'name': 'SMB'}},
}
def __init__(
self,
*,
smb: Optional["ShareSmbSettings"] = None,
**kwargs
):
super(ShareProtocolSettings, self).__init__(**kwargs)
self.smb = smb
class ShareSmbSettings(msrest.serialization.Model):
"""Settings for SMB protocol.
:param multichannel: Settings for SMB Multichannel.
:type multichannel: ~azure.storage.fileshare.models.SmbMultichannel
"""
_attribute_map = {
'multichannel': {'key': 'Multichannel', 'type': 'SmbMultichannel'},
}
def __init__(
self,
*,
multichannel: Optional["SmbMultichannel"] = None,
**kwargs
):
super(ShareSmbSettings, self).__init__(**kwargs)
self.multichannel = multichannel
class ShareStats(msrest.serialization.Model):
"""Stats for the share.
All required parameters must be populated in order to send to Azure.
:param share_usage_bytes: Required. The approximate size of the data stored in bytes. Note that
this value may not include all recently created or recently resized files.
:type share_usage_bytes: int
"""
_validation = {
'share_usage_bytes': {'required': True},
}
_attribute_map = {
'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int'},
}
def __init__(
self,
*,
share_usage_bytes: int,
**kwargs
):
super(ShareStats, self).__init__(**kwargs)
self.share_usage_bytes = share_usage_bytes
class SignedIdentifier(msrest.serialization.Model):
"""Signed identifier.
All required parameters must be populated in order to send to Azure.
:param id: Required. A unique id.
:type id: str
:param access_policy: The access policy.
:type access_policy: ~azure.storage.fileshare.models.AccessPolicy
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'Id', 'type': 'str'},
'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'},
}
def __init__(
self,
*,
id: str,
access_policy: Optional["AccessPolicy"] = None,
**kwargs
):
super(SignedIdentifier, self).__init__(**kwargs)
self.id = id
self.access_policy = access_policy
class SmbMultichannel(msrest.serialization.Model):
"""Settings for SMB multichannel.
:param enabled: If SMB multichannel is enabled.
:type enabled: bool
"""
_attribute_map = {
'enabled': {'key': 'Enabled', 'type': 'bool'},
}
_xml_map = {
'name': 'Multichannel'
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
**kwargs
):
super(SmbMultichannel, self).__init__(**kwargs)
self.enabled = enabled
class SourceModifiedAccessConditions(msrest.serialization.Model):
"""Parameter group.
:param source_if_match_crc64: Specify the crc64 value to operate only on range with a matching
crc64 checksum.
:type source_if_match_crc64: bytearray
:param source_if_none_match_crc64: Specify the crc64 value to operate only on range without a
matching crc64 checksum.
:type source_if_none_match_crc64: bytearray
"""
_attribute_map = {
'source_if_match_crc64': {'key': 'sourceIfMatchCrc64', 'type': 'bytearray'},
'source_if_none_match_crc64': {'key': 'sourceIfNoneMatchCrc64', 'type': 'bytearray'},
}
def __init__(
self,
*,
source_if_match_crc64: Optional[bytearray] = None,
source_if_none_match_crc64: Optional[bytearray] = None,
**kwargs
):
super(SourceModifiedAccessConditions, self).__init__(**kwargs)
self.source_if_match_crc64 = source_if_match_crc64
self.source_if_none_match_crc64 = source_if_none_match_crc64
class StorageError(msrest.serialization.Model):
"""StorageError.
:param message:
:type message: str
"""
_attribute_map = {
'message': {'key': 'Message', 'type': 'str'},
}
def __init__(
self,
*,
message: Optional[str] = None,
**kwargs
):
super(StorageError, self).__init__(**kwargs)
self.message = message
class StorageServiceProperties(msrest.serialization.Model):
"""Storage service properties.
:param hour_metrics: A summary of request statistics grouped by API in hourly aggregates for
files.
:type hour_metrics: ~azure.storage.fileshare.models.Metrics
:param minute_metrics: A summary of request statistics grouped by API in minute aggregates for
files.
:type minute_metrics: ~azure.storage.fileshare.models.Metrics
:param cors: The set of CORS rules.
:type cors: list[~azure.storage.fileshare.models.CorsRule]
:param protocol: Protocol settings.
:type protocol: ~azure.storage.fileshare.models.ShareProtocolSettings
"""
_attribute_map = {
'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'},
'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'},
'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}},
'protocol': {'key': 'Protocol', 'type': 'ShareProtocolSettings', 'xml': {'name': 'ProtocolSettings'}},
}
def __init__(
self,
*,
hour_metrics: Optional["Metrics"] = None,
minute_metrics: Optional["Metrics"] = None,
cors: Optional[List["CorsRule"]] = None,
protocol: Optional["ShareProtocolSettings"] = None,
**kwargs
):
super(StorageServiceProperties, self).__init__(**kwargs)
self.hour_metrics = hour_metrics
self.minute_metrics = minute_metrics
self.cors = cors
self.protocol = protocol
| mit | 229,342,826,065,661,280 | 32.647852 | 364 | 0.611017 | false |
maikito26/script.foscam | resources/lib/utils.py | 1 | 6718 | import os
import time
import glob
import xbmc
import xbmcaddon
import xbmcgui
import requests
__addon__ = xbmcaddon.Addon()
__id__ = __addon__.getAddonInfo('id')
__icon__ = __addon__.getAddonInfo('icon').decode("utf-8")
__version__ = __addon__.getAddonInfo('version')
addon_name = __addon__.getLocalizedString(32000)
TEXTURE_FMT = os.path.join(__addon__.getAddonInfo('path'), 'resources', 'media', '{0}.png')
ACTION_PREVIOUS_MENU = 10
ACTION_BACKSPACE = 110
ACTION_NAV_BACK = 92
ACTION_STOP = 13
ACTION_SELECT_ITEM = 7
INVALID_PASSWORD_CHARS = ('{', '}', ':', ';', '!', '?', '@', '\\', '/')
INVALID_USER_CHARS = ('@',)
def log(message, level=xbmc.LOGNOTICE):
xbmc.log("{0} v{1}: {2}".format(__id__, __version__, message), level=level)
def log_normal(message):
if int(__addon__.getSetting('debug')) > 0:
log(message)
def log_verbose(message):
if int(__addon__.getSetting('debug')) == 2:
log(message)
def log_error(message):
log(message, xbmc.LOGERROR)
def notify(msg, time=10000):
xbmcgui.Dialog().notification(addon_name, msg, __icon__, time)
def addon_info(info):
return __addon__.getAddonInfo(info)
def get_string(ident):
return __addon__.getLocalizedString(ident)
def get_setting(ident):
return __addon__.getSetting(ident)
def get_bool_setting(ident):
return get_setting(ident) == "true"
def get_int_setting(ident):
try:
return int(get_setting(ident))
except ValueError:
return None
def get_float_setting(ident):
return float(get_setting(ident))
def set_setting(ident, value):
__addon__.setSetting(ident, value)
def open_settings(callback=None):
if callback is not None:
callback()
__addon__.openSettings()
def invalid_char(credential, chars, stringid, show_dialog):
for char in chars:
if char in credential:
if show_dialog:
xbmcgui.Dialog().ok(get_string(32000), get_string(stringid),
" ", " ".join(chars))
return char
return False
def invalid_password_char(password, show_dialog=False):
return invalid_char(password, INVALID_PASSWORD_CHARS, 32105, show_dialog)
def invalid_user_char(user, show_dialog=False):
return invalid_char(user, INVALID_USER_CHARS, 32106, show_dialog)
def error_dialog(msg):
xbmcgui.Dialog().ok(get_string(32000), msg, " ", get_string(32102))
open_settings()
class SnapShot(object):
def __init__(self, path, interval, get_data):
self.time = time.time()
self.interval = interval
self.filename = os.path.join(path, "{0}.jpg".format(self.time))
self.get_data = get_data
def __enter__(self):
return self
def save(self):
with open(self.filename, 'wb') as output:
log_verbose("Snapshot {0}".format(self.filename))
data = self.get_data()
if data:
output.write(data)
return self.filename
else:
return ""
def __exit__(self, exc_type, exc_value, traceback):
current_time = time.time()
elapsed = current_time - self.time
log_verbose("Retrieving snapshot took {0:.2f} seconds".format(elapsed))
remaining = int(self.interval - elapsed*1000)
sleep = max(200, remaining)
log_verbose("Sleeping for {0} milliseconds".format(sleep))
xbmc.sleep(sleep)
try:
os.remove(self.filename)
except:
pass
else:
log_verbose("Deleted {0}".format(self.filename))
def get_mjpeg_frame(stream):
content_length = ""
try:
while not "Length" in content_length:
content_length = stream.readline()
log_verbose("Stream Readline: " + content_length)
bytes = int(content_length.split(':')[-1])
log_verbose("Stream JPEG Read Size: " + str(bytes))
content_length = stream.readline()
log_verbose("Stream Readline: " + content_length)
return stream.read(bytes)
except requests.RequestException as e:
utils.log_error(str(e))
return None
class ExtractMJPEGFrames(object):
def __init__(self, path, duration, stream, callback, *args):
self.path = path
self.duration = duration
self.stream = stream
self.callback = callback
self.callback_args = args
self._stop = False
def __enter__(self):
return self
def stop(self):
self._stop = True
def start(self):
start_time = time.time()
current_time = start_time
frames = 0
while current_time < start_time + self.duration and not self._stop:
xbmc.sleep(1)
frame = get_mjpeg_frame(self.stream)
if frame:
filename = os.path.join(self.path, "snapshot.{0}.jpg".format(time.time()))
with open(filename, 'wb') as jpeg_file:
jpeg_file.write(frame)
self.callback(filename, *self.callback_args)
log_verbose("Snapshot {0}".format(filename))
current_time = time.time()
frames += 1
duration = current_time - start_time
log_normal("Average fps: {0:.2f}".format(frames / duration))
return int(duration)
def __exit__(self, exc_type, exc_value, traceback):
self.stream.close()
for jpg in glob.glob(os.path.join(self.path, "snapshot.*.jpg")):
try:
os.remove(jpg)
except:
log_verbose("Unable to delete {0}".format(jpg))
else:
log_verbose("Deleted {0}".format(jpg))
class Monitor(xbmc.Monitor):
def __init__(self, updated_settings_callback):
xbmc.Monitor.__init__(self)
self.updated_settings_callback = updated_settings_callback
def onSettingsChanged(self):
self.updated_settings_callback()
class StopResumePlayer(xbmc.Player):
def maybe_stop_current(self):
if self.isPlaying():
self.resume_time = self.getTime()
self.previous_file = self.getPlayingFile()
self.stop()
log_normal("Stopped {0}".format(self.previous_file))
else:
self.previous_file = None
def maybe_resume_previous(self):
if self.previous_file is not None:
resume_time_str = "{0:.1f}".format(self.resume_time - 10.)
log_normal("Resuming {0} at {1}".format(self.previous_file, resume_time_str))
listitem = xbmcgui.ListItem()
listitem.setProperty('StartOffset', resume_time_str)
self.play(self.previous_file, listitem)
| gpl-3.0 | 8,232,375,941,994,584,000 | 28.991071 | 91 | 0.591694 | false |
sahiljain/catapult | dashboard/dashboard/pinpoint/models/quest/run_test_test.py | 1 | 7300 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mock
from dashboard.pinpoint.models.quest import run_test
_SWARMING_TASK_EXTRA_ARGS = [
'test_suite', '--story-filter', 'test',
'-v', '--upload-results',
'--output-format=chartjson', '--browser=release',
'--isolated-script-test-output=${ISOLATED_OUTDIR}/output.json',
'--isolated-script-test-chartjson-output='
'${ISOLATED_OUTDIR}/chartjson-output.json',
]
class _RunTestTest(unittest.TestCase):
def assertNewTaskHasDimensions(self, swarming_tasks_new):
body = {
'name': 'Pinpoint job on Mac Pro 10.11 Perf',
'user': 'Pinpoint',
'priority': '100',
'expiration_secs': '600',
'properties': {
'inputs_ref': {'isolated': 'input isolated hash'},
'extra_args': _SWARMING_TASK_EXTRA_ARGS,
'dimensions': [
{'key': 'pool', 'value': 'Chrome-perf-pinpoint'},
{"key": "cores", "value": "8"},
{"key": "gpu", "value": "1002:6821"},
{"key": "os", "value": "Mac-10.11"},
],
'execution_timeout_secs': '3600',
'io_timeout_secs': '3600',
},
'tags': [
'configuration:Mac Pro 10.11 Perf',
],
}
swarming_tasks_new.assert_called_with(body)
def assertNewTaskHasBotId(self, swarming_tasks_new):
body = {
'name': 'Pinpoint job on Mac Pro 10.11 Perf',
'user': 'Pinpoint',
'priority': '100',
'expiration_secs': '600',
'properties': {
'inputs_ref': {'isolated': 'input isolated hash'},
'extra_args': _SWARMING_TASK_EXTRA_ARGS,
'dimensions': [
{'key': 'pool', 'value': 'Chrome-perf-pinpoint'},
{'key': 'id', 'value': 'bot id'},
],
'execution_timeout_secs': '3600',
'io_timeout_secs': '3600',
},
'tags': [
'configuration:Mac Pro 10.11 Perf',
],
}
swarming_tasks_new.assert_called_with(body)
@mock.patch('dashboard.services.swarming_service.Tasks.New')
@mock.patch('dashboard.services.swarming_service.Task.Result')
class RunTestFullTest(_RunTestTest):
def testSuccess(self, swarming_task_result, swarming_tasks_new):
# Goes through a full run of two Executions.
# Call RunTest.Start() to create an Execution.
quest = run_test.RunTest('Mac Pro 10.11 Perf', 'test_suite', 'test')
execution = quest.Start('input isolated hash')
swarming_task_result.assert_not_called()
swarming_tasks_new.assert_not_called()
# Call the first Poll() to start the swarming task.
swarming_tasks_new.return_value = {'task_id': 'task id'}
execution.Poll()
swarming_task_result.assert_not_called()
swarming_tasks_new.assert_called_once()
self.assertNewTaskHasDimensions(swarming_tasks_new)
self.assertFalse(execution.completed)
self.assertFalse(execution.failed)
# Call subsequent Poll()s to check the task status.
swarming_task_result.return_value = {'state': 'PENDING'}
execution.Poll()
self.assertFalse(execution.completed)
self.assertFalse(execution.failed)
swarming_task_result.return_value = {
'bot_id': 'bot id',
'exit_code': 0,
'failure': False,
'outputs_ref': {'isolated': 'output isolated hash'},
'state': 'COMPLETED',
}
execution.Poll()
self.assertTrue(execution.completed)
self.assertFalse(execution.failed)
self.assertEqual(execution.result_values, (0,))
self.assertEqual(execution.result_arguments,
{'isolated_hash': 'output isolated hash'})
# Start a second Execution to check bot_id handling. We get a bot_id from
# Swarming from the first Execution and reuse it in subsequent Executions.
execution = quest.Start('input isolated hash')
execution.Poll()
self.assertNewTaskHasBotId(swarming_tasks_new)
@mock.patch('dashboard.services.swarming_service.Tasks.New')
@mock.patch('dashboard.services.swarming_service.Task.Result')
class SwarmingTaskStartTest(_RunTestTest):
def testUnknownConfig(self, swarming_task_result, swarming_tasks_new):
quest = run_test.RunTest('configuration', 'test_suite', 'test')
execution = quest.Start('input isolated hash')
execution.Poll()
swarming_task_result.assert_not_called()
swarming_tasks_new.assert_not_called()
self.assertTrue(execution.completed)
self.assertTrue(execution.failed)
self.assertEqual(len(execution.result_values), 1)
self.assertIsInstance(execution.result_values[0],
run_test.UnknownConfigError)
@mock.patch('dashboard.services.swarming_service.Tasks.New')
@mock.patch('dashboard.services.swarming_service.Task.Result')
class SwarmingTaskStatusTest(_RunTestTest):
def testSwarmingError(self, swarming_task_result, swarming_tasks_new):
swarming_task_result.return_value = {'state': 'BOT_DIED'}
swarming_tasks_new.return_value = {'task_id': 'task id'}
quest = run_test.RunTest('Mac Pro 10.11 Perf', 'test_suite', 'test')
execution = quest.Start('input isolated hash')
execution.Poll()
execution.Poll()
self.assertTrue(execution.completed)
self.assertTrue(execution.failed)
self.assertEqual(len(execution.result_values), 1)
self.assertIsInstance(execution.result_values[0],
run_test.SwarmingTaskError)
def testTestError(self, swarming_task_result, swarming_tasks_new):
swarming_task_result.return_value = {
'bot_id': 'bot id',
'exit_code': 1,
'failure': True,
'state': 'COMPLETED',
}
swarming_tasks_new.return_value = {'task_id': 'task id'}
quest = run_test.RunTest('Mac Pro 10.11 Perf', 'test_suite', 'test')
execution = quest.Start('isolated_hash')
execution.Poll()
execution.Poll()
self.assertTrue(execution.completed)
self.assertTrue(execution.failed)
self.assertEqual(len(execution.result_values), 1)
self.assertIsInstance(execution.result_values[0],
run_test.SwarmingTestError)
@mock.patch('dashboard.services.swarming_service.Tasks.New')
@mock.patch('dashboard.services.swarming_service.Task.Result')
class BotIdHandlingTest(_RunTestTest):
def testExecutionFailure(self, swarming_task_result, swarming_tasks_new):
swarming_tasks_new.return_value = {'task_id': 'task id'}
swarming_task_result.return_value = {'state': 'EXPIRED'}
quest = run_test.RunTest('Mac Pro 10.11 Perf', 'test_suite', 'test')
execution = quest.Start('input isolated hash')
execution.Poll()
execution.Poll()
swarming_task_result.return_value = {
'bot_id': 'bot id',
'exit_code': 0,
'failure': False,
'outputs_ref': {'isolated': 'output isolated hash'},
'state': 'COMPLETED',
}
execution = quest.Start('input isolated hash')
execution.Poll()
execution.Poll()
self.assertNewTaskHasDimensions(swarming_tasks_new)
execution = quest.Start('input isolated hash')
execution.Poll()
self.assertNewTaskHasBotId(swarming_tasks_new)
| bsd-3-clause | 8,147,936,623,772,982,000 | 33.433962 | 78 | 0.643836 | false |
AndiDog/git-cola | cola/widgets/main.py | 1 | 40624 | """This view provides the main git-cola user interface.
"""
from __future__ import division, absolute_import, unicode_literals
import os
from functools import partial
from qtpy import QtCore
from qtpy import QtGui
from qtpy import QtWidgets
from qtpy.QtCore import Qt
from qtpy.QtCore import Signal
from ..compat import uchr
from ..compat import WIN32
from ..i18n import N_
from ..interaction import Interaction
from ..models import prefs
from ..qtutils import get
from ..settings import Settings
from .. import cmds
from .. import core
from .. import guicmds
from .. import git
from .. import gitcmds
from .. import hotkeys
from .. import icons
from .. import qtutils
from .. import resources
from .. import utils
from .. import version
from . import about
from . import action
from . import archive
from . import bookmarks
from . import branch
from . import submodules
from . import browse
from . import cfgactions
from . import clone
from . import commitmsg
from . import compare
from . import createbranch
from . import createtag
from . import dag
from . import defs
from . import diff
from . import finder
from . import editremotes
from . import grep
from . import log
from . import merge
from . import patch
from . import prefs as prefs_widget
from . import recent
from . import remote
from . import search
from . import standard
from . import status
from . import stash
from . import toolbar
class MainView(standard.MainWindow):
config_actions_changed = Signal(object)
updated = Signal()
def __init__(self, context, parent=None, settings=None):
standard.MainWindow.__init__(self, parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.context = context
self.git = context.git
self.dag = None
self.model = model = context.model
self.settings = settings
self.prefs_model = prefs_model = prefs.PreferencesModel(context)
self.toolbar_state = toolbar.ToolBarState(context, self)
# The widget version is used by import/export_state().
# Change this whenever dockwidgets are removed.
self.widget_version = 2
create_dock = qtutils.create_dock
cfg = context.cfg
self.browser_dockable = cfg.get('cola.browserdockable')
if self.browser_dockable:
browser = browse.worktree_browser(context, parent=self,
show=False, update=False)
self.browserdock = create_dock(N_('Browser'), self, widget=browser)
# "Actions" widget
self.actionsdock = create_dock(
N_('Actions'), self, widget=action.ActionButtons(context, self))
qtutils.hide_dock(self.actionsdock)
# "Repository Status" widget
self.statusdock = create_dock(
N_('Status'), self,
fn=lambda dock: status.StatusWidget(
context, dock.titleBarWidget(), dock))
self.statuswidget = self.statusdock.widget()
# "Switch Repository" widgets
self.bookmarksdock = create_dock(
N_('Favorites'), self,
fn=lambda dock: bookmarks.bookmark(context, dock))
bookmarkswidget = self.bookmarksdock.widget()
qtutils.hide_dock(self.bookmarksdock)
self.recentdock = create_dock(
N_('Recent'), self,
fn=lambda dock: bookmarks.recent(context, dock))
recentwidget = self.recentdock.widget()
qtutils.hide_dock(self.recentdock)
bookmarkswidget.connect_to(recentwidget)
# "Branch" widgets
self.branchdock = create_dock(
N_('Branches'), self, fn=partial(branch.BranchesWidget, context))
self.branchwidget = self.branchdock.widget()
titlebar = self.branchdock.titleBarWidget()
titlebar.add_corner_widget(self.branchwidget.filter_button)
titlebar.add_corner_widget(self.branchwidget.sort_order_button)
# "Submodule" widgets
self.submodulesdock = create_dock(
N_('Submodules'), self,
fn=partial(submodules.SubmodulesWidget, context))
self.submoduleswidget = self.submodulesdock.widget()
# "Commit Message Editor" widget
self.position_label = QtWidgets.QLabel()
self.position_label.setAlignment(Qt.AlignCenter)
font = qtutils.default_monospace_font()
font.setPointSize(int(font.pointSize() * 0.8))
self.position_label.setFont(font)
# make the position label fixed size to avoid layout issues
fm = self.position_label.fontMetrics()
width = fm.width('99:999') + defs.spacing
self.position_label.setMinimumWidth(width)
editor = commitmsg.CommitMessageEditor(context, self)
self.commiteditor = editor
self.commitdock = create_dock(N_('Commit'), self, widget=editor)
titlebar = self.commitdock.titleBarWidget()
titlebar.add_corner_widget(self.position_label)
# "Console" widget
self.logwidget = log.LogWidget(context)
self.logdock = create_dock(N_('Console'), self, widget=self.logwidget)
qtutils.hide_dock(self.logdock)
# "Diff Viewer" widget
self.diffdock = create_dock(
N_('Diff'), self,
fn=lambda dock: diff.Viewer(context, parent=dock))
self.diffviewer = self.diffdock.widget()
self.diffviewer.set_diff_type(self.model.diff_type)
self.diffeditor = self.diffviewer.text
titlebar = self.diffdock.titleBarWidget()
titlebar.add_corner_widget(self.diffviewer.options)
# All Actions
add_action = qtutils.add_action
add_action_bool = qtutils.add_action_bool
self.commit_amend_action = add_action_bool(
self, N_('Amend Last Commit'),
partial(cmds.do, cmds.AmendMode, context), False)
self.commit_amend_action.setShortcut(hotkeys.AMEND)
self.commit_amend_action.setShortcutContext(Qt.WidgetShortcut)
self.unstage_all_action = add_action(
self, N_('Unstage All'), cmds.run(cmds.UnstageAll, context))
self.unstage_all_action.setIcon(icons.remove())
self.unstage_selected_action = add_action(
self, N_('Unstage From Commit'),
cmds.run(cmds.UnstageSelected, context))
self.unstage_selected_action.setIcon(icons.remove())
self.show_diffstat_action = add_action(
self, N_('Diffstat'), self.statuswidget.select_header,
hotkeys.DIFFSTAT)
self.stage_modified_action = add_action(
self, N_('Stage Changed Files To Commit'),
cmds.run(cmds.StageModified, context), hotkeys.STAGE_MODIFIED)
self.stage_modified_action.setIcon(icons.add())
self.stage_untracked_action = add_action(
self, N_('Stage All Untracked'),
cmds.run(cmds.StageUntracked, context), hotkeys.STAGE_UNTRACKED)
self.stage_untracked_action.setIcon(icons.add())
self.apply_patches_action = add_action(
self, N_('Apply Patches...'),
partial(patch.apply_patches, context))
self.export_patches_action = add_action(
self, N_('Export Patches...'),
partial(guicmds.export_patches, context), hotkeys.EXPORT)
self.new_repository_action = add_action(
self, N_('New Repository...'),
partial(guicmds.open_new_repo, context))
self.new_repository_action.setIcon(icons.new())
self.new_bare_repository_action = add_action(
self, N_('New Bare Repository...'),
partial(guicmds.new_bare_repo, context))
self.new_bare_repository_action.setIcon(icons.new())
prefs_fn = partial(
prefs_widget.preferences, context, parent=self, model=prefs_model)
self.preferences_action = add_action(
self, N_('Preferences'), prefs_fn, QtGui.QKeySequence.Preferences)
self.edit_remotes_action = add_action(
self, N_('Edit Remotes...'), partial(editremotes.editor, context))
self.rescan_action = add_action(
self, cmds.Refresh.name(), cmds.run(cmds.Refresh, context),
*hotkeys.REFRESH_HOTKEYS)
self.rescan_action.setIcon(icons.sync())
self.find_files_action = add_action(
self, N_('Find Files'), partial(finder.finder, context),
hotkeys.FINDER, hotkeys.FINDER_SECONDARY)
self.find_files_action.setIcon(icons.zoom_in())
self.browse_recently_modified_action = add_action(
self, N_('Recently Modified Files...'),
partial(recent.browse_recent_files, context),
hotkeys.EDIT_SECONDARY)
self.cherry_pick_action = add_action(
self, N_('Cherry-Pick...'), partial(guicmds.cherry_pick, context),
hotkeys.CHERRY_PICK)
self.load_commitmsg_action = add_action(
self, N_('Load Commit Message...'),
partial(guicmds.load_commitmsg, context))
self.prepare_commitmsg_hook_action = add_action(
self, N_('Prepare Commit Message'),
cmds.run(cmds.PrepareCommitMessageHook, context),
hotkeys.PREPARE_COMMIT_MESSAGE)
self.save_tarball_action = add_action(
self, N_('Save As Tarball/Zip...'),
partial(archive.save_archive, context))
self.quit_action = add_action(
self, N_('Quit'), self.close, hotkeys.QUIT)
self.grep_action = add_action(
self, N_('Grep'), partial(grep.grep, context), hotkeys.GREP)
self.merge_local_action = add_action(
self, N_('Merge...'), partial(merge.local_merge, context),
hotkeys.MERGE)
self.merge_abort_action = add_action(
self, N_('Abort Merge...'), cmds.run(cmds.AbortMerge, context))
self.update_submodules_action = add_action(
self, N_('Update All Submodules...'),
cmds.run(cmds.SubmodulesUpdate, context))
self.fetch_action = add_action(
self, N_('Fetch...'), partial(remote.fetch, context),
hotkeys.FETCH)
self.push_action = add_action(
self, N_('Push...'), partial(remote.push, context), hotkeys.PUSH)
self.pull_action = add_action(
self, N_('Pull...'), partial(remote.pull, context), hotkeys.PULL)
self.open_repo_action = add_action(
self, N_('Open...'),
partial(guicmds.open_repo, context), hotkeys.OPEN)
self.open_repo_action.setIcon(icons.folder())
self.open_repo_new_action = add_action(
self, N_('Open in New Window...'),
partial(guicmds.open_repo_in_new_window, context))
self.open_repo_new_action.setIcon(icons.folder())
self.stash_action = add_action(
self, N_('Stash...'), partial(stash.view, context), hotkeys.STASH)
self.reset_branch_head_action = add_action(
self, N_('Reset Branch Head'),
partial(guicmds.reset_branch_head, context))
self.reset_worktree_action = add_action(
self, N_('Reset Worktree'),
partial(guicmds.reset_worktree, context))
self.clone_repo_action = add_action(
self, N_('Clone...'),
partial(clone.clone, context, settings=settings))
self.clone_repo_action.setIcon(icons.repo())
self.help_docs_action = add_action(
self, N_('Documentation'), resources.show_html_docs,
QtGui.QKeySequence.HelpContents)
self.help_shortcuts_action = add_action(
self, N_('Keyboard Shortcuts'), about.show_shortcuts,
hotkeys.QUESTION)
self.visualize_current_action = add_action(
self, N_('Visualize Current Branch...'),
cmds.run(cmds.VisualizeCurrent, context))
self.visualize_all_action = add_action(
self, N_('Visualize All Branches...'),
cmds.run(cmds.VisualizeAll, context))
self.search_commits_action = add_action(
self, N_('Search...'), partial(search.search, context))
self.browse_branch_action = add_action(
self, N_('Browse Current Branch...'),
partial(guicmds.browse_current, context))
self.browse_other_branch_action = add_action(
self, N_('Browse Other Branch...'),
partial(guicmds.browse_other, context))
self.load_commitmsg_template_action = add_action(
self, N_('Get Commit Message Template'),
cmds.run(cmds.LoadCommitMessageFromTemplate, context))
self.help_about_action = add_action(
self, N_('About'), partial(about.about_dialog, context))
self.diff_expression_action = add_action(
self, N_('Expression...'),
partial(guicmds.diff_expression, context))
self.branch_compare_action = add_action(
self, N_('Branches...'),
partial(compare.compare_branches, context))
self.create_tag_action = add_action(
self, N_('Create Tag...'),
partial(createtag.create_tag, context, settings=settings))
self.create_branch_action = add_action(
self, N_('Create...'),
partial(createbranch.create_new_branch, context,
settings=settings),
hotkeys.BRANCH)
self.create_branch_action.setIcon(icons.branch())
self.delete_branch_action = add_action(
self, N_('Delete...'),
partial(guicmds.delete_branch, context))
self.delete_remote_branch_action = add_action(
self, N_('Delete Remote Branch...'),
partial(guicmds.delete_remote_branch, context))
self.rename_branch_action = add_action(
self, N_('Rename Branch...'),
partial(guicmds.rename_branch, context))
self.checkout_branch_action = add_action(
self, N_('Checkout...'),
partial(guicmds.checkout_branch, context),
hotkeys.CHECKOUT)
self.branch_review_action = add_action(
self, N_('Review...'),
partial(guicmds.review_branch, context))
self.browse_action = add_action(
self, N_('File Browser...'),
partial(browse.worktree_browser, context))
self.browse_action.setIcon(icons.cola())
self.dag_action = add_action(self, N_('DAG...'), self.git_dag)
self.dag_action.setIcon(icons.cola())
self.rebase_start_action = add_action(
self, N_('Start Interactive Rebase...'),
cmds.run(cmds.Rebase, context), hotkeys.REBASE_START_AND_CONTINUE)
self.rebase_edit_todo_action = add_action(
self, N_('Edit...'), cmds.run(cmds.RebaseEditTodo, context))
self.rebase_continue_action = add_action(
self, N_('Continue'), cmds.run(cmds.RebaseContinue, context),
hotkeys.REBASE_START_AND_CONTINUE)
self.rebase_skip_action = add_action(
self, N_('Skip Current Patch'), cmds.run(cmds.RebaseSkip, context))
self.rebase_abort_action = add_action(
self, N_('Abort'), cmds.run(cmds.RebaseAbort, context))
# For "Start Rebase" only, reverse the first argument to setEnabled()
# so that we can operate on it as a group.
# We can do this because can_rebase == not is_rebasing
self.rebase_start_action_proxy = utils.Proxy(
self.rebase_start_action,
setEnabled=lambda x: self.rebase_start_action.setEnabled(not x))
self.rebase_group = utils.Group(self.rebase_start_action_proxy,
self.rebase_edit_todo_action,
self.rebase_continue_action,
self.rebase_skip_action,
self.rebase_abort_action)
self.annex_init_action = qtutils.add_action(
self, N_('Initialize Git Annex'),
cmds.run(cmds.AnnexInit, context))
self.lfs_init_action = qtutils.add_action(
self, N_('Initialize Git LFS'), cmds.run(cmds.LFSInstall, context))
self.lock_layout_action = add_action_bool(
self, N_('Lock Layout'), self.set_lock_layout, False)
self.reset_layout_action = add_action(
self, N_('Reset Layout'), self.reset_layout)
# Create the application menu
self.menubar = QtWidgets.QMenuBar(self)
self.setMenuBar(self.menubar)
# File Menu
add_menu = qtutils.add_menu
self.file_menu = add_menu(N_('&File'), self.menubar)
# File->Open Recent menu
self.open_recent_menu = self.file_menu.addMenu(N_('Open Recent'))
self.open_recent_menu.setIcon(icons.folder())
self.file_menu.addAction(self.open_repo_action)
self.file_menu.addAction(self.open_repo_new_action)
self.file_menu.addSeparator()
self.file_menu.addAction(self.new_repository_action)
self.file_menu.addAction(self.new_bare_repository_action)
self.file_menu.addAction(self.clone_repo_action)
self.file_menu.addSeparator()
self.file_menu.addAction(self.rescan_action)
self.file_menu.addAction(self.find_files_action)
self.file_menu.addAction(self.edit_remotes_action)
self.file_menu.addAction(self.browse_recently_modified_action)
self.file_menu.addSeparator()
self.file_menu.addAction(self.apply_patches_action)
self.file_menu.addAction(self.export_patches_action)
self.file_menu.addAction(self.save_tarball_action)
# Git Annex / Git LFS
annex = core.find_executable('git-annex')
lfs = core.find_executable('git-lfs')
if annex or lfs:
self.file_menu.addSeparator()
if annex:
self.file_menu.addAction(self.annex_init_action)
if lfs:
self.file_menu.addAction(self.lfs_init_action)
self.file_menu.addSeparator()
self.file_menu.addAction(self.preferences_action)
self.file_menu.addAction(self.quit_action)
# Edit Menu
self.edit_proxy = edit_proxy = (
FocusProxy(editor, editor.summary, editor.description))
copy_widgets = (
self, editor.summary, editor.description, self.diffeditor,
bookmarkswidget.tree, recentwidget.tree,
)
edit_proxy.override('copy', copy_widgets)
edit_proxy.override('selectAll', copy_widgets)
edit_menu = self.edit_menu = add_menu(N_('&Edit'), self.menubar)
add_action(edit_menu, N_('Undo'), edit_proxy.undo, hotkeys.UNDO)
add_action(edit_menu, N_('Redo'), edit_proxy.redo, hotkeys.REDO)
edit_menu.addSeparator()
add_action(edit_menu, N_('Cut'), edit_proxy.cut, hotkeys.CUT)
add_action(edit_menu, N_('Copy'), edit_proxy.copy, hotkeys.COPY)
add_action(edit_menu, N_('Paste'), edit_proxy.paste, hotkeys.PASTE)
add_action(edit_menu, N_('Delete'), edit_proxy.delete, hotkeys.DELETE)
edit_menu.addSeparator()
add_action(edit_menu, N_('Select All'), edit_proxy.selectAll,
hotkeys.SELECT_ALL)
edit_menu.addSeparator()
commitmsg.add_menu_actions(edit_menu, self.commiteditor.menu_actions)
# Actions menu
self.actions_menu = add_menu(N_('Actions'), self.menubar)
self.actions_menu.addAction(self.fetch_action)
self.actions_menu.addAction(self.push_action)
self.actions_menu.addAction(self.pull_action)
self.actions_menu.addAction(self.stash_action)
self.actions_menu.addSeparator()
self.actions_menu.addAction(self.create_tag_action)
self.actions_menu.addAction(self.cherry_pick_action)
self.actions_menu.addAction(self.merge_local_action)
self.actions_menu.addAction(self.merge_abort_action)
self.actions_menu.addSeparator()
self.actions_menu.addAction(self.update_submodules_action)
self.actions_menu.addSeparator()
self.actions_reset_menu = self.actions_menu.addMenu(N_('Reset'))
self.actions_reset_menu.addAction(self.reset_branch_head_action)
self.actions_reset_menu.addAction(self.reset_worktree_action)
self.actions_menu.addSeparator()
self.actions_menu.addAction(self.grep_action)
self.actions_menu.addAction(self.search_commits_action)
# Commit Menu
self.commit_menu = add_menu(N_('Commit@@verb'), self.menubar)
self.commit_menu.setTitle(N_('Commit@@verb'))
self.commit_menu.addAction(self.commiteditor.commit_action)
self.commit_menu.addAction(self.commit_amend_action)
self.commit_menu.addSeparator()
self.commit_menu.addAction(self.stage_modified_action)
self.commit_menu.addAction(self.stage_untracked_action)
self.commit_menu.addSeparator()
self.commit_menu.addAction(self.unstage_all_action)
self.commit_menu.addAction(self.unstage_selected_action)
self.commit_menu.addSeparator()
self.commit_menu.addAction(self.load_commitmsg_action)
self.commit_menu.addAction(self.load_commitmsg_template_action)
self.commit_menu.addAction(self.prepare_commitmsg_hook_action)
# Diff Menu
self.diff_menu = add_menu(N_('Diff'), self.menubar)
self.diff_menu.addAction(self.diff_expression_action)
self.diff_menu.addAction(self.branch_compare_action)
self.diff_menu.addSeparator()
self.diff_menu.addAction(self.show_diffstat_action)
# Branch Menu
self.branch_menu = add_menu(N_('Branch'), self.menubar)
self.branch_menu.addAction(self.branch_review_action)
self.branch_menu.addSeparator()
self.branch_menu.addAction(self.create_branch_action)
self.branch_menu.addAction(self.checkout_branch_action)
self.branch_menu.addAction(self.delete_branch_action)
self.branch_menu.addAction(self.delete_remote_branch_action)
self.branch_menu.addAction(self.rename_branch_action)
self.branch_menu.addSeparator()
self.branch_menu.addAction(self.browse_branch_action)
self.branch_menu.addAction(self.browse_other_branch_action)
self.branch_menu.addSeparator()
self.branch_menu.addAction(self.visualize_current_action)
self.branch_menu.addAction(self.visualize_all_action)
# Rebase menu
self.rebase_menu = add_menu(N_('Rebase'), self.actions_menu)
self.rebase_menu.addAction(self.rebase_start_action)
self.rebase_menu.addAction(self.rebase_edit_todo_action)
self.rebase_menu.addSeparator()
self.rebase_menu.addAction(self.rebase_continue_action)
self.rebase_menu.addAction(self.rebase_skip_action)
self.rebase_menu.addSeparator()
self.rebase_menu.addAction(self.rebase_abort_action)
# View Menu
self.view_menu = add_menu(N_('View'), self.menubar)
# pylint: disable=no-member
self.view_menu.aboutToShow.connect(
lambda: self.build_view_menu(self.view_menu))
self.setup_dockwidget_view_menu()
if utils.is_darwin():
# TODO or self.menubar.setNativeMenuBar(False)
# Since native OSX menu doesn't show empty entries
self.build_view_menu(self.view_menu)
# Help Menu
self.help_menu = add_menu(N_('Help'), self.menubar)
self.help_menu.addAction(self.help_docs_action)
self.help_menu.addAction(self.help_shortcuts_action)
self.help_menu.addAction(self.help_about_action)
# Arrange dock widgets
bottom = Qt.BottomDockWidgetArea
top = Qt.TopDockWidgetArea
self.addDockWidget(top, self.statusdock)
self.addDockWidget(top, self.commitdock)
if self.browser_dockable:
self.addDockWidget(top, self.browserdock)
self.tabifyDockWidget(self.browserdock, self.commitdock)
self.addDockWidget(top, self.branchdock)
self.addDockWidget(top, self.submodulesdock)
self.addDockWidget(top, self.bookmarksdock)
self.addDockWidget(top, self.recentdock)
self.tabifyDockWidget(self.branchdock, self.submodulesdock)
self.tabifyDockWidget(self.submodulesdock, self.bookmarksdock)
self.tabifyDockWidget(self.bookmarksdock, self.recentdock)
self.branchdock.raise_()
self.addDockWidget(bottom, self.diffdock)
self.addDockWidget(bottom, self.actionsdock)
self.addDockWidget(bottom, self.logdock)
self.tabifyDockWidget(self.actionsdock, self.logdock)
# Listen for model notifications
model.add_observer(model.message_updated, self.updated.emit)
model.add_observer(model.message_mode_changed,
lambda mode: self.updated.emit())
prefs_model.add_observer(prefs_model.message_config_updated,
self._config_updated)
# Set a default value
self.show_cursor_position(1, 0)
self.commit_menu.aboutToShow.connect(self.update_menu_actions)
self.open_recent_menu.aboutToShow.connect(self.build_recent_menu)
self.commiteditor.cursor_changed.connect(self.show_cursor_position)
self.diffeditor.options_changed.connect(self.statuswidget.refresh)
self.diffeditor.up.connect(self.statuswidget.move_up)
self.diffeditor.down.connect(self.statuswidget.move_down)
self.commiteditor.up.connect(self.statuswidget.move_up)
self.commiteditor.down.connect(self.statuswidget.move_down)
self.updated.connect(self.refresh, type=Qt.QueuedConnection)
self.config_actions_changed.connect(self._install_config_actions,
type=Qt.QueuedConnection)
self.init_state(settings, self.set_initial_size)
# Route command output here
Interaction.log_status = self.logwidget.log_status
Interaction.log = self.logwidget.log
# Focus the status widget; this must be deferred
QtCore.QTimer.singleShot(0, self.initialize)
def initialize(self):
context = self.context
git_version = version.git_version_str(context)
if git_version:
ok = True
Interaction.log(git_version + '\n' +
N_('git cola version %s') % version.version())
else:
ok = False
error_msg = N_('error: unable to execute git')
Interaction.log(error_msg)
if ok:
self.statuswidget.setFocus()
else:
title = N_('error: unable to execute git')
msg = title
details = ''
if WIN32:
details = git.win32_git_error_hint()
Interaction.critical(title, message=msg, details=details)
self.context.app.exit(2)
def set_initial_size(self):
# Default size; this is thrown out when save/restore is used
width, height = qtutils.desktop_size()
self.resize((width*3)//4, height)
self.statuswidget.set_initial_size()
self.commiteditor.set_initial_size()
def set_filter(self, txt):
self.statuswidget.set_filter(txt)
# Qt overrides
def closeEvent(self, event):
"""Save state in the settings"""
commit_msg = self.commiteditor.commit_message(raw=True)
self.model.save_commitmsg(msg=commit_msg)
standard.MainWindow.closeEvent(self, event)
def create_view_menu(self):
menu = qtutils.create_menu(N_('View'), self)
self.build_view_menu(menu)
return menu
def build_view_menu(self, menu):
menu.clear()
menu.addAction(self.browse_action)
menu.addAction(self.dag_action)
menu.addSeparator()
popup_menu = self.createPopupMenu()
for menu_action in popup_menu.actions():
menu_action.setParent(menu)
menu.addAction(menu_action)
menu.addSeparator()
context = self.context
menu_action = menu.addAction(
N_('Add Toolbar'), partial(toolbar.add_toolbar, context, self))
menu_action.setIcon(icons.add())
dockwidgets = [
self.logdock,
self.commitdock,
self.statusdock,
self.diffdock,
self.actionsdock,
self.bookmarksdock,
self.recentdock,
self.branchdock,
self.submodulesdock
]
if self.browser_dockable:
dockwidgets.append(self.browserdock)
for dockwidget in dockwidgets:
# Associate the action with the shortcut
toggleview = dockwidget.toggleViewAction()
menu.addAction(toggleview)
menu.addSeparator()
menu.addAction(self.lock_layout_action)
menu.addAction(self.reset_layout_action)
return menu
def contextMenuEvent(self, event):
menu = self.create_view_menu()
menu.exec_(event.globalPos())
def build_recent_menu(self):
settings = Settings()
settings.load()
cmd = cmds.OpenRepo
context = self.context
menu = self.open_recent_menu
menu.clear()
worktree = self.git.worktree()
for entry in settings.recent:
directory = entry['path']
if directory == worktree:
# Omit the current worktree from the "Open Recent" menu.
continue
name = entry['name']
text = '%s %s %s' % (name, uchr(0x2192), directory)
menu.addAction(text, cmds.run(cmd, context, directory))
# Accessors
mode = property(lambda self: self.model.mode)
def _config_updated(self, _source, config, value):
if config == prefs.FONTDIFF:
# The diff font
font = QtGui.QFont()
if not font.fromString(value):
return
self.logwidget.setFont(font)
self.diffeditor.setFont(font)
self.commiteditor.setFont(font)
elif config == prefs.TABWIDTH:
# This can be set locally or globally, so we have to use the
# effective value otherwise we'll update when we shouldn't.
# For example, if this value is overridden locally, and the
# global value is tweaked, we should not update.
value = prefs.tabwidth(self.context)
self.diffeditor.set_tabwidth(value)
self.commiteditor.set_tabwidth(value)
elif config == prefs.EXPANDTAB:
self.commiteditor.set_expandtab(value)
elif config == prefs.LINEBREAK:
# enables automatic line breaks
self.commiteditor.set_linebreak(value)
elif config == prefs.SORT_BOOKMARKS:
self.bookmarksdock.widget().reload_bookmarks()
elif config == prefs.TEXTWIDTH:
# Use the effective value for the same reason as tabwidth.
value = prefs.textwidth(self.context)
self.commiteditor.set_textwidth(value)
elif config == prefs.SHOW_PATH:
# the path in the window title was toggled
self.refresh_window_title()
def start(self, context):
"""Do the expensive "get_config_actions()" call in the background"""
# Install .git-config-defined actions
task = qtutils.SimpleTask(self, self.get_config_actions)
context.runtask.start(task)
def get_config_actions(self):
actions = cfgactions.get_config_actions(self.context)
self.config_actions_changed.emit(actions)
def _install_config_actions(self, names_and_shortcuts):
"""Install .gitconfig-defined actions"""
if not names_and_shortcuts:
return
context = self.context
menu = self.actions_menu
menu.addSeparator()
for (name, shortcut) in names_and_shortcuts:
callback = cmds.run(cmds.RunConfigAction, context, name)
menu_action = menu.addAction(name, callback)
if shortcut:
menu_action.setShortcut(shortcut)
def refresh(self):
"""Update the title with the current branch and directory name."""
curbranch = self.model.currentbranch
curdir = core.getcwd()
is_merging = self.model.is_merging
is_rebasing = self.model.is_rebasing
msg = N_('Repository: %s') % curdir
msg += '\n'
msg += N_('Branch: %s') % curbranch
if is_rebasing:
msg += '\n\n'
msg += N_('This repository is currently being rebased.\n'
'Resolve conflicts, commit changes, and run:\n'
' Rebase > Continue')
elif is_merging:
msg += '\n\n'
msg += N_('This repository is in the middle of a merge.\n'
'Resolve conflicts and commit changes.')
self.refresh_window_title()
if self.mode == self.model.mode_amend:
self.commit_amend_action.setChecked(True)
else:
self.commit_amend_action.setChecked(False)
self.commitdock.setToolTip(msg)
self.commiteditor.set_mode(self.mode)
self.update_actions()
def refresh_window_title(self):
"""Refresh the window title when state changes"""
alerts = []
project = self.model.project
curbranch = self.model.currentbranch
is_merging = self.model.is_merging
is_rebasing = self.model.is_rebasing
prefix = uchr(0xab)
suffix = uchr(0xbb)
if is_rebasing:
alerts.append(N_('Rebasing'))
elif is_merging:
alerts.append(N_('Merging'))
if self.mode == self.model.mode_amend:
alerts.append(N_('Amending'))
if alerts:
alert_text = (prefix + ' %s ' + suffix + ' ') % ', '.join(alerts)
else:
alert_text = ''
if self.model.cfg.get(prefs.SHOW_PATH, True):
path_text = self.git.worktree()
else:
path_text = ''
title = '%s: %s %s%s' % (project, curbranch, alert_text, path_text)
self.setWindowTitle(title)
def update_actions(self):
is_rebasing = self.model.is_rebasing
self.rebase_group.setEnabled(is_rebasing)
enabled = not self.model.is_empty_repository()
self.rename_branch_action.setEnabled(enabled)
self.delete_branch_action.setEnabled(enabled)
self.annex_init_action.setEnabled(not self.model.annex)
self.lfs_init_action.setEnabled(not self.model.lfs)
def update_menu_actions(self):
# Enable the Prepare Commit Message action if the hook exists
hook = gitcmds.prepare_commit_message_hook(self.context)
enabled = os.path.exists(hook)
self.prepare_commitmsg_hook_action.setEnabled(enabled)
def export_state(self):
state = standard.MainWindow.export_state(self)
show_status_filter = self.statuswidget.filter_widget.isVisible()
state['show_status_filter'] = show_status_filter
state['toolbars'] = self.toolbar_state.export_state()
state['ref_sort'] = self.model.ref_sort
self.diffviewer.export_state(state)
return state
def apply_state(self, state):
"""Imports data for save/restore"""
base_ok = standard.MainWindow.apply_state(self, state)
lock_layout = state.get('lock_layout', False)
self.lock_layout_action.setChecked(lock_layout)
show_status_filter = state.get('show_status_filter', False)
self.statuswidget.filter_widget.setVisible(show_status_filter)
toolbars = state.get('toolbars', [])
self.toolbar_state.apply_state(toolbars)
sort_key = state.get('ref_sort', 0)
self.model.set_ref_sort(sort_key)
diff_ok = self.diffviewer.apply_state(state)
return base_ok and diff_ok
def setup_dockwidget_view_menu(self):
# Hotkeys for toggling the dock widgets
if utils.is_darwin():
optkey = 'Meta'
else:
optkey = 'Ctrl'
dockwidgets = (
(optkey + '+0', self.logdock),
(optkey + '+1', self.commitdock),
(optkey + '+2', self.statusdock),
(optkey + '+3', self.diffdock),
(optkey + '+4', self.actionsdock),
(optkey + '+5', self.bookmarksdock),
(optkey + '+6', self.recentdock),
(optkey + '+7', self.branchdock),
(optkey + '+8', self.submodulesdock)
)
for shortcut, dockwidget in dockwidgets:
# Associate the action with the shortcut
toggleview = dockwidget.toggleViewAction()
toggleview.setShortcut('Shift+' + shortcut)
def showdock(show, dockwidget=dockwidget):
if show:
dockwidget.raise_()
dockwidget.widget().setFocus()
else:
self.setFocus()
self.addAction(toggleview)
qtutils.connect_action_bool(toggleview, showdock)
# Create a new shortcut Shift+<shortcut> that gives focus
toggleview = QtWidgets.QAction(self)
toggleview.setShortcut(shortcut)
def focusdock(dockwidget=dockwidget):
focus_dock(dockwidget)
self.addAction(toggleview)
qtutils.connect_action(toggleview, focusdock)
# These widgets warrant home-row hotkey status
qtutils.add_action(self, 'Focus Commit Message',
lambda: focus_dock(self.commitdock),
hotkeys.FOCUS)
qtutils.add_action(self, 'Focus Status Window',
lambda: focus_dock(self.statusdock),
hotkeys.FOCUS_STATUS)
qtutils.add_action(self, 'Focus Diff Editor',
lambda: focus_dock(self.diffdock),
hotkeys.FOCUS_DIFF)
def git_dag(self):
self.dag = dag.git_dag(self.context, existing_view=self.dag)
def show_cursor_position(self, rows, cols):
display = '%02d:%02d' % (rows, cols)
css = """
<style>
.good {
}
.first-warning {
color: black;
background-color: yellow;
}
.second-warning {
color: black;
background-color: #f83;
}
.error {
color: white;
background-color: red;
}
</style>
"""
if cols > 78:
cls = 'error'
elif cols > 72:
cls = 'second-warning'
elif cols > 64:
cls = 'first-warning'
else:
cls = 'good'
div = ('<div class="%s">%s</div>' % (cls, display))
self.position_label.setText(css + div)
class FocusProxy(object):
"""Proxy over child widgets and operate on the focused widget"""
def __init__(self, *widgets):
self.widgets = widgets
self.overrides = {}
def override(self, name, widgets):
self.overrides[name] = widgets
def focus(self, name):
"""Return the currently focused widget"""
widgets = self.overrides.get(name, self.widgets)
# The parent must be the parent of all the proxied widgets
parent = widgets[0]
# The first widget is used as a fallback
fallback = widgets[1]
# We ignore the parent when delegating to child widgets
widgets = widgets[1:]
focus = parent.focusWidget()
if focus not in widgets:
focus = fallback
return focus
def __getattr__(self, name):
"""Return a callback that calls a common child method"""
def callback():
focus = self.focus(name)
fn = getattr(focus, name, None)
if fn:
fn()
return callback
def delete(self):
"""Specialized delete() to deal with QLineEdit vs QTextEdit"""
focus = self.focus('delete')
if hasattr(focus, 'del_'):
focus.del_()
elif hasattr(focus, 'textCursor'):
focus.textCursor().deleteChar()
def show_dock(dockwidget):
dockwidget.raise_()
dockwidget.widget().setFocus()
def focus_dock(dockwidget):
if get(dockwidget.toggleViewAction()):
show_dock(dockwidget)
else:
dockwidget.toggleViewAction().trigger()
| gpl-2.0 | 5,640,615,500,585,334,000 | 37.288407 | 79 | 0.611954 | false |
sid5432/pyOTDR | pyotdr/main.py | 1 | 1399 | import os
import logging
import argparse
from pyotdr.dump import tofile, ExportDataType
from pyotdr.read import sorparse
logging.basicConfig(format="%(message)s")
logger = logging.getLogger(__name__)
LOG_LEVEL = os.getenv("LOG_LEVEL", "DEBUG")
logger.setLevel(LOG_LEVEL)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("SOR_file", type=str, help="Name of the sor file to transform")
parser.add_argument(
"format",
type=ExportDataType,
choices=list(ExportDataType),
default=ExportDataType.JSON,
help="Output format : JSON or XML",
nargs="?",
)
args = parser.parse_args()
logging.basicConfig(format="%(message)s")
root_logger = logging.getLogger("pyotdr")
root_logger.setLevel(LOG_LEVEL)
filename = args.SOR_file
opformat = ExportDataType(args.format)
_, results, tracedata = sorparse(filename)
# construct data file name to dump results
fn_strip, _ = os.path.splitext(os.path.basename(filename))
datafile = fn_strip + "-dump." + str(opformat).lower()
with open(datafile, "w") as output:
tofile(results, output, format=opformat)
# construct data file name
fn_strip, _ = os.path.splitext(os.path.basename(filename))
opfile = fn_strip + "-trace.dat"
with open(opfile, "w") as output:
for xy in tracedata:
output.write(xy)
| gpl-3.0 | -2,556,162,915,401,240,000 | 28.145833 | 87 | 0.664046 | false |
zhouyao1994/incubator-superset | superset/utils/dashboard_import_export.py | 1 | 1863 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
import json
import logging
import time
from superset.models.core import Dashboard
from superset.utils.core import decode_dashboards
def import_dashboards(session, data_stream, import_time=None):
"""Imports dashboards from a stream to databases"""
current_tt = int(time.time())
import_time = current_tt if import_time is None else import_time
data = json.loads(data_stream.read(), object_hook=decode_dashboards)
# TODO: import DRUID datasources
for table in data["datasources"]:
type(table).import_obj(table, import_time=import_time)
session.commit()
for dashboard in data["dashboards"]:
Dashboard.import_obj(dashboard, import_time=import_time)
session.commit()
def export_dashboards(session):
"""Returns all dashboards metadata as a json dump"""
logging.info("Starting export")
dashboards = session.query(Dashboard)
dashboard_ids = []
for dashboard in dashboards:
dashboard_ids.append(dashboard.id)
data = Dashboard.export_dashboards(dashboard_ids)
return data
| apache-2.0 | 5,545,386,890,963,763,000 | 37.8125 | 72 | 0.741278 | false |
RobbieNor/note-extractor | settings.py | 1 | 9569 | #!/usr/bin/python
import ConfigParser, inspect, os, importlib
import stringbar
#Settings
args = {'arg_backuppath' : None, 'arg_show' : None, 'arg_sideratio' : None, 'arg_bordersize' : None, 'arg_osn' : None, 'arg_workspacepath' : None, 'arg_rotateleft' : None, 'arg_rotateright' : None, 'arg_pageheight' : None, 'arg_backup' : None, 'arg_centre' : None, 'arg_rawpath' : None, 'arg_autocrop' : None, 'arg_pagewidth' : None, 'arg_arearatio' : None, 'arg_rawtype' : None, 'arg_portrait' : None, 'arg_landscape' : None, 'arg_fullpage' : None, 'arg_restore' : None, 'arg_delete' : None, 'arg_verbose' : None, 'arg_progpath' : None, 'arg_profile' : None, 'arg_config' : None, 'arg_confpath' : None,'arg_save' : None}
def setConfig(parsed_args, arg_progpath, setverbose = True):
valid_config = True
#Set Runtime Path and Config Path
args["arg_progpath"] = str(arg_progpath)
if 'arg_load' in parsed_args:
if os.path.exists(args['arg_progpath'] + '/config/' + str(parsed_args['arg_load']) + '.conf'):
args['arg_confpath'] = args['arg_progpath'] + '/config/' + str(parsed_args['arg_load']) + '.conf'
else:
stringbar.setVerbose(False)
return (False,['REquested Config file does not exist'],[])
else:
args["arg_confpath"] = str(arg_progpath) + "/config/default.conf"
#Parse given arguments and put in arg dictionary
Config = ConfigParser.ConfigParser()
Config.read(args["arg_confpath"])
dicto = argsToDict(Config)
config_Dict = {}
config_Dict.update(dicto.get("Preferences"))
config_Dict.update(dicto.get("Settings"))
#Make sure all values are evaluated to correct types
for item in config_Dict:
try:
config_Dict[item] = eval(config_Dict[item])
except Exception:
pass
for item in parsed_args:
try:
parsed_items[item] = eval(parsed_items[item])
except Exception:
pass
#Transfer saved config to arg dictionary
for entry in config_Dict:
args[entry] = config_Dict[entry]
#Check parsed arguments are valid
parsed_issue_list = checkParsedArgs(parsed_args)
if len(parsed_issue_list) != 0:
valid_config = False
#Overwrite config setting with any given arguments
for entry in parsed_args:
if entry == "arg_portrait":
args["arg_landscape"] = None
elif entry == "arg_landscape":
args["arg_portrait"] = None
elif entry == 'arg_rotateright':
args['arg_rotateleft'] = None
elif entry == 'arg_rotateleft':
args['arg_rotateright'] = None
args[entry] = parsed_args[entry]
#Make sure all args are consistant
#Double check that all args are properly evaluated
for item in args:
try:
if item[-1] == '/':
args[item] = eval(args[item[:-1]])
else:
args[item] = eval(args[item])
except Exception:
pass
#Calculate ratios from given arguments
args["arg_arearatio"] = float((args.get("arg_pageheight")/2 - 2*args.get("arg_bordersize")) * (args.get("arg_pagewidth")/2 - 2*args.get("arg_bordersize")))
args['arg_sideratio'] = float((args['arg_pageheight']))/float(args['arg_pagewidth'])
#Check resulting arguments are valid
final_issue_list = checkFinalArgs()
if len(final_issue_list) != 0:
valid_config = False
if setverbose:
stringbar.setVerbose(args['arg_verbose'])
if args['arg_save'] and len(final_issue_list) == 0 and len(parsed_issue_list) == 0:
saveConfig()
return (valid_config,parsed_issue_list,final_issue_list)
def checkParsedArgs(parsed_args):
#Check given arguments are sane and consistant
parsed_issue_list = []
if "arg_portrait" in parsed_args and "arg_landscape" in parsed_args:
if parsed_args["arg_portrait"] == True and parsed_args["arg_landscape"] == True:
parsed_issue_list.append("Orientation assumed to be both landscape and portrait.")
if "arg_rotateright" in parsed_args and "arg_rotateleft" in parsed_args:
parsed_issue_list.append("Cannot rotate in two directions simultaneously")
for entry in parsed_args:
if entry == "arg_bordersize" or entry == 'arg_pageheight' or entry == 'arg_pagewidth':
if parsed_args[entry] < 0:
parsed_issue_list.append("Negative value given for: " + str(entry))
if parsed_args[entry] == None:
parsed_issue_list.append("Unknown error, entry shouldn't exist")
os.path.splitext(svg)[0].split('/')[-1]
return parsed_issue_list
def is_mod_function(mod, func):
return inspect.isfunction(func) and inspect.getmodule(func) == mod
def list_functions(mod):
return [func.__name__ for func in mod.__dict__.itervalues()
if is_mod_function(mod, func)]
def checkFinalArgs():
final_issue_list = []
if args["arg_workspacepath"] == None or type(args["arg_workspacepath"]) != str:
final_issue_list.append("arg_workspacepath")
if args["arg_backup"] == None or type(args["arg_backup"]) != bool:
final_issue_list.append("arg_backup")
if args["arg_osn"] == None or type(args["arg_osn"]) != bool:
final_issue_list.append("arg_osn")
if args["arg_show"] == None or type(args["arg_show"]) != bool:
final_issue_list.append("arg_show")
if args["arg_autocrop"] == None or type(args["arg_autocrop"]) != bool:
final_issue_list.append("arg_autocrop")
if args["arg_centre"] == None or type(args["arg_centre"]) != bool:
final_issue_list.append("arg_centre")
#if args["arg_rotate"] == None or type(args["arg_rotate"]) != bool:
# final_issue_list.append("arg_rotate")
if args["arg_sideratio"] == None or args["arg_sideratio"] < 0.1 or type(float(args["arg_sideratio"])) != float:
final_issue_list.append("arg_sideratio")
if args["arg_bordersize"] == None or args["arg_bordersize"] < 0 or type(args["arg_bordersize"]) != int:
final_issue_list.append("arg_bordersize")
if args["arg_fullpage"]:
if 2*args["arg_bordersize"] > min(args["arg_pageheight"],args["arg_pagewidth"]):
final_issue_list.append("Bordersize is greater than smallest potential page size")
else:
if 2*args["arg_bordersize"] > min(args["arg_pageheight"]/2,args["arg_pagewidth"]/2):
final_issue_list.append("Bordersize is greater than smallest potential page size")
if args["arg_pageheight"] == None or args["arg_pageheight"] < 10 or type(args["arg_pageheight"]) != int:
final_issue_list.append("arg_pageheight")
if args["arg_pagewidth"] == None or args["arg_pagewidth"] < 10 or type(args["arg_pagewidth"]) != int:
final_issue_list.append("arg_pagewidth")
if args["arg_portrait"] == True and args["arg_landscape"] == True:
final_issue_list.append("Cannot simultaneously force Portrait and Landscape Orientations")
if args["arg_fullpage"] == None or type(args["arg_fullpage"]) != bool:
final_issue_list.append("arg_fullpage")
if args["arg_profile"] == None:
final_issue_list.append("arg_profile")
if args["arg_backup"] == True and type(args["arg_backuppath"]) != str :
final_issue_list.append("Must specify a backup path when backingup")
if args["arg_verbose"] == None:
final_issue_list.append("arg_verbose")
#if args["arg_delete"] == None:
# final_issue_list.append("arg_delete")
if args["arg_rotateright"] == True and args["arg_rotateleft"] == True:
final_issue_list.append("Cannot rotate in two directions simultaneously")
#Check Profile validity
profile_list = [os.path.splitext(os.path.join(dp, f))[0].split('/')[-1] for dp, dn, filenames in os.walk(args["arg_progpath"] + "/profiles/") for f in filenames if os.path.splitext(f)[1] == '.py']
args['arg_profile'] = str(args['arg_profile'])
if args["arg_profile"] in profile_list:
profile = importlib.import_module('profiles.' + args["arg_profile"])
if "convertRawToSVG" and "pullRawData" and "clearRawData" in list_functions(profile):
try:
args['arg_rawtype'] = profile.arg_rawtype
args['arg_rawpath'] = profile.arg_rawpath
except Exception:
final_issue_list.append("Profile does not contain all required attributes (arg_rawtype, arg_rawpath)")
else:
final_issue_list.append("Profile does not contain all required methods (convertRawToSVG, pullRawData, clearRawData)")
else:
final_issue_list.append("Profile does not exist")
return final_issue_list
def argsToDict(Config):
d = dict(Config._sections)
for k in d:
d[k] = dict(Config._defaults, **d[k])
d[k].pop('__name__', None)
return d
def manuallyConfigure(arg_progpath):
os.system('nohup xdg-open ' + arg_progpath + "/config/saved.conf > /dev/null 2>&1 &")
def viewLogFile(arg_progpath):
os.system('xdg-open ' + arg_progpath + "/.log > /dev/null 2>&1 &")
#logfile = open(arg_progpath + '/.log', 'r')
#log = logfile.readlines()
#for line in log:
# print line,
def viewNotes(parsed_args,arg_progpath):
setConfig(parsed_args, arg_progpath, False)
os.system("nohup xdg-open " + args['arg_workspacepath'] + "/Images/1.png > /dev/null 2>&1 &")
def clearRaw(parsed_args, arg_progpath):
setConfig(parsed_args, arg_progpath, False)
profile = importlib.import_module('profiles.' + args['arg_profile'])
profile.clearRawData(0,False)
def saveConfig():
newconf_path = args['arg_progpath'] + '/config/' + args['arg_save'] + '.conf'
prefs_to_save = ['arg_autocrop','arg_backup','arg_bordersize','arg_centre','arg_rotateleft','arg_rotateright','arg_portrait','arg_landscape','arg_fullpage','arg_osn','arg_show','arg_verbose']
setts_to_save = ['arg_profile','arg_workspacepath','arg_backuppath','arg_pageheight','arg_pagewidth']
if os.path.exists(newconf_path):
os.remove(newconf_path)
new_conf = file(newconf_path,'w')
new_conf.write('[Settings]\n')
for conf in setts_to_save:
new_conf.write(conf + ' = ' + str(args[conf]) + '\n')
new_conf.write('\n[Preferences]\n')
for conf in prefs_to_save:
new_conf.write(conf + ' = ' + str(args[conf]) + '\n')
new_conf.close()
| gpl-2.0 | 5,031,598,925,003,352,000 | 45.906863 | 621 | 0.685965 | false |
inorton/junit2html | junit2htmlreport/matrix.py | 1 | 7952 | """
Handle multiple parsed junit reports
"""
from __future__ import unicode_literals
import os
from . import parser
from .common import ReportContainer
from .parser import SKIPPED, FAILED, PASSED, ABSENT
from .render import HTMLMatrix, HTMLReport
UNTESTED = "untested"
PARTIAL_PASS = "partial pass"
PARTIAL_FAIL = "partial failure"
TOTAL_FAIL = "total failure"
class ReportMatrix(ReportContainer):
"""
Load and handle several report files
"""
def __init__(self):
super(ReportMatrix, self).__init__()
self.cases = {}
self.classes = {}
self.casenames = {}
self.result_stats = {}
self.case_results = {}
def add_case_result(self, case):
testclass = case.testclass.name
casename = case.name
if testclass not in self.case_results:
self.case_results[testclass] = {}
if casename not in self.case_results[testclass]:
self.case_results[testclass][casename] = []
self.case_results[testclass][casename].append(case.outcome())
def report_order(self):
return sorted(self.reports.keys())
def short_outcome(self, outcome):
if outcome == PASSED:
return "/"
elif outcome == SKIPPED:
return "s"
elif outcome == FAILED:
return "f"
elif outcome == TOTAL_FAIL:
return "F"
elif outcome == PARTIAL_PASS:
return "%"
elif outcome == PARTIAL_FAIL:
return "X"
elif outcome == UNTESTED:
return "U"
return "?"
def add_report(self, filename):
"""
Load a report into the matrix
:param filename:
:return:
"""
parsed = parser.Junit(filename=filename)
filename = os.path.basename(filename)
self.reports[filename] = parsed
for suite in parsed.suites:
for testclass in suite.classes:
if testclass not in self.classes:
self.classes[testclass] = {}
if testclass not in self.casenames:
self.casenames[testclass] = list()
self.classes[testclass][filename] = suite.classes[testclass]
for testcase in self.classes[testclass][filename].cases:
name = testcase.name.strip()
if name not in self.casenames[testclass]:
self.casenames[testclass].append(name)
if testclass not in self.cases:
self.cases[testclass] = {}
if name not in self.cases[testclass]:
self.cases[testclass][name] = {}
self.cases[testclass][name][filename] = testcase
outcome = testcase.outcome()
self.add_case_result(testcase)
self.result_stats[outcome] = 1 + self.result_stats.get(
outcome, 0)
def summary(self):
"""
Render a summary of the matrix
:return:
"""
raise NotImplementedError()
def combined_result_list(self, classname, casename):
"""
Combone the result of all instances of the given case
:param classname:
:param casename:
:return:
"""
if classname in self.case_results:
if casename in self.case_results[classname]:
results = self.case_results[classname][casename]
return self.combined_result(results)
return " ", ""
def combined_result(self, results):
"""
Given a list of results, produce a "combined" overall result
:param results:
:return:
"""
if results:
if PASSED in results:
if FAILED in results:
return self.short_outcome(PARTIAL_FAIL), PARTIAL_FAIL.title()
return self.short_outcome(PASSED), PASSED.title()
if FAILED in results:
return self.short_outcome(FAILED), FAILED.title()
if SKIPPED in results:
return self.short_outcome(UNTESTED), UNTESTED.title()
return " ", ""
class HtmlReportMatrix(ReportMatrix):
"""
Render a matrix report as html
"""
def __init__(self, outdir):
super(HtmlReportMatrix, self).__init__()
self.outdir = outdir
def add_report(self, filename):
"""
Load a report
"""
super(HtmlReportMatrix, self).add_report(filename)
basename = os.path.basename(filename)
# make the individual report too
report = self.reports[basename].html()
if self.outdir != "" and not os.path.exists(self.outdir):
os.makedirs(self.outdir)
with open(
os.path.join(self.outdir, basename) + ".html", "wb") as filehandle:
filehandle.write(report.encode("utf-8"))
def short_outcome(self, outcome):
if outcome == PASSED:
return "ok"
return super(HtmlReportMatrix, self).short_outcome(outcome)
def short_axis(self, axis):
if axis.endswith(".xml"):
return axis[:-4]
return axis
def summary(self):
"""
Render the html
:return:
"""
html_matrix = HTMLMatrix(self)
return str(html_matrix)
class TextReportMatrix(ReportMatrix):
"""
Render a matrix report as text
"""
def summary(self):
"""
Render as a string
:return:
"""
output = "\nMatrix Test Report\n"
output += "===================\n"
axis = list(self.reports.keys())
axis.sort()
# find the longest classname or test case name
left_indent = 0
for classname in self.classes:
left_indent = max(len(classname), left_indent)
for casename in self.casenames[classname]:
left_indent = max(len(casename), left_indent)
# render the axis headings in a stepped tree
treelines = ""
for filename in self.report_order():
output += "{} {}{}\n".format(" " * left_indent, treelines,
filename)
treelines += "| "
output += "{} {}\n".format(" " * left_indent, treelines)
# render in groups of the same class
for classname in self.classes:
# new class
output += "{} \n".format(classname)
# print the case name
for casename in sorted(set(self.casenames[classname])):
output += "- {}{} ".format(casename,
" " * (left_indent - len(casename)))
# print each test and its result for each axis
case_data = ""
for axis in self.report_order():
if axis not in self.cases[classname][casename]:
case_data += " "
else:
testcase = self.cases[classname][casename][axis]
if testcase.skipped:
case_data += "s "
elif testcase.failure:
case_data += "f "
else:
case_data += "/ "
combined, combined_name = self.combined_result(
self.case_results[classname][testcase.name])
output += case_data
output += " {} {}\n".format(combined, combined_name)
# print the result stats
output += "\n"
output += "-" * 79
output += "\n"
output += "Test Results:\n"
for outcome in sorted(self.result_stats):
output += " {:<12} : {:>6}\n".format(
outcome.title(),
self.result_stats[outcome])
return output
| mit | -5,829,537,178,853,907,000 | 30.43083 | 83 | 0.522384 | false |
Andy-hpliu/AirtestX | atx/__init__.py | 1 | 1668 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module is to make mobile test more easily
"""
from __future__ import absolute_import
import os
import sys
import signal
import pkg_resources
try:
version = pkg_resources.get_distribution("atx").version
except pkg_resources.DistributionNotFound:
version = 'unknown'
from atx.consts import *
from atx.errors import *
from atx.device import Pattern, Bounds
def connect(*args, **kwargs):
"""Connect to a device, and return its object
Args:
platform: string one of <android|ios|windows>
Returns:
None
Raises:
SyntaxError, EnvironmentError
"""
platform = kwargs.pop('platform', os.getenv('ATX_PLATFORM') or 'android')
cls = None
if platform == 'android':
os.environ['JSONRPC_TIMEOUT'] = "10" # default is 90s which is too long.
devcls = __import__('atx.device.android')
cls = devcls.device.android.AndroidDevice
elif platform == 'windows':
devcls = __import__('atx.device.windows')
cls = devcls.device.windows.WindowsDevice
elif platform == 'ios':
devcls = __import__('atx.device.ios_webdriveragent')
cls = devcls.device.ios_webdriveragent.IOSDevice
elif platform == 'dummy': # for py.test use
devcls = __import__('atx.device.dummy')
cls = devcls.device.dummy.DummyDevice
if cls is None:
raise SyntaxError('Platform: %s not exists' % platform)
c = cls(*args, **kwargs)
return c
# def _sig_handler(signum, frame):
# print >>sys.stderr, 'Signal INT catched !!!'
# sys.exit(1)
# signal.signal(signal.SIGINT, _sig_handler)
| apache-2.0 | -3,891,371,900,709,590,500 | 26.344262 | 80 | 0.641487 | false |
Azure/azure-sdk-for-python | sdk/maintenance/azure-mgmt-maintenance/azure/mgmt/maintenance/operations/_configuration_assignments_operations.py | 1 | 28766 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ConfigurationAssignmentsOperations(object):
"""ConfigurationAssignmentsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.maintenance.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create_or_update_parent(
self,
resource_group_name, # type: str
provider_name, # type: str
resource_parent_type, # type: str
resource_parent_name, # type: str
resource_type, # type: str
resource_name, # type: str
configuration_assignment_name, # type: str
configuration_assignment, # type: "_models.ConfigurationAssignment"
**kwargs # type: Any
):
# type: (...) -> "_models.ConfigurationAssignment"
"""Create configuration assignment.
Register configuration for resource.
:param resource_group_name: Resource group name.
:type resource_group_name: str
:param provider_name: Resource provider name.
:type provider_name: str
:param resource_parent_type: Resource parent type.
:type resource_parent_type: str
:param resource_parent_name: Resource parent identifier.
:type resource_parent_name: str
:param resource_type: Resource type.
:type resource_type: str
:param resource_name: Resource identifier.
:type resource_name: str
:param configuration_assignment_name: Configuration assignment name.
:type configuration_assignment_name: str
:param configuration_assignment: The configurationAssignment.
:type configuration_assignment: ~azure.mgmt.maintenance.models.ConfigurationAssignment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConfigurationAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.maintenance.models.ConfigurationAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConfigurationAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update_parent.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'providerName': self._serialize.url("provider_name", provider_name, 'str'),
'resourceParentType': self._serialize.url("resource_parent_type", resource_parent_type, 'str'),
'resourceParentName': self._serialize.url("resource_parent_name", resource_parent_name, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'configurationAssignmentName': self._serialize.url("configuration_assignment_name", configuration_assignment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(configuration_assignment, 'ConfigurationAssignment')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.MaintenanceError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConfigurationAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_parent.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceParentType}/{resourceParentName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/configurationAssignments/{configurationAssignmentName}'} # type: ignore
def delete_parent(
self,
resource_group_name, # type: str
provider_name, # type: str
resource_parent_type, # type: str
resource_parent_name, # type: str
resource_type, # type: str
resource_name, # type: str
configuration_assignment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ConfigurationAssignment"]
"""Unregister configuration for resource.
Unregister configuration for resource.
:param resource_group_name: Resource group name.
:type resource_group_name: str
:param provider_name: Resource provider name.
:type provider_name: str
:param resource_parent_type: Resource parent type.
:type resource_parent_type: str
:param resource_parent_name: Resource parent identifier.
:type resource_parent_name: str
:param resource_type: Resource type.
:type resource_type: str
:param resource_name: Resource identifier.
:type resource_name: str
:param configuration_assignment_name: Unique configuration assignment name.
:type configuration_assignment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConfigurationAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.maintenance.models.ConfigurationAssignment or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ConfigurationAssignment"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.delete_parent.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'providerName': self._serialize.url("provider_name", provider_name, 'str'),
'resourceParentType': self._serialize.url("resource_parent_type", resource_parent_type, 'str'),
'resourceParentName': self._serialize.url("resource_parent_name", resource_parent_name, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'configurationAssignmentName': self._serialize.url("configuration_assignment_name", configuration_assignment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.MaintenanceError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConfigurationAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_parent.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceParentType}/{resourceParentName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/configurationAssignments/{configurationAssignmentName}'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
provider_name, # type: str
resource_type, # type: str
resource_name, # type: str
configuration_assignment_name, # type: str
configuration_assignment, # type: "_models.ConfigurationAssignment"
**kwargs # type: Any
):
# type: (...) -> "_models.ConfigurationAssignment"
"""Create configuration assignment.
Register configuration for resource.
:param resource_group_name: Resource group name.
:type resource_group_name: str
:param provider_name: Resource provider name.
:type provider_name: str
:param resource_type: Resource type.
:type resource_type: str
:param resource_name: Resource identifier.
:type resource_name: str
:param configuration_assignment_name: Configuration assignment name.
:type configuration_assignment_name: str
:param configuration_assignment: The configurationAssignment.
:type configuration_assignment: ~azure.mgmt.maintenance.models.ConfigurationAssignment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConfigurationAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.maintenance.models.ConfigurationAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConfigurationAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'providerName': self._serialize.url("provider_name", provider_name, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'configurationAssignmentName': self._serialize.url("configuration_assignment_name", configuration_assignment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(configuration_assignment, 'ConfigurationAssignment')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.MaintenanceError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConfigurationAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/configurationAssignments/{configurationAssignmentName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
provider_name, # type: str
resource_type, # type: str
resource_name, # type: str
configuration_assignment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ConfigurationAssignment"]
"""Unregister configuration for resource.
Unregister configuration for resource.
:param resource_group_name: Resource group name.
:type resource_group_name: str
:param provider_name: Resource provider name.
:type provider_name: str
:param resource_type: Resource type.
:type resource_type: str
:param resource_name: Resource identifier.
:type resource_name: str
:param configuration_assignment_name: Unique configuration assignment name.
:type configuration_assignment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConfigurationAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.maintenance.models.ConfigurationAssignment or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ConfigurationAssignment"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'providerName': self._serialize.url("provider_name", provider_name, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'configurationAssignmentName': self._serialize.url("configuration_assignment_name", configuration_assignment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.MaintenanceError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConfigurationAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/configurationAssignments/{configurationAssignmentName}'} # type: ignore
def list_parent(
self,
resource_group_name, # type: str
provider_name, # type: str
resource_parent_type, # type: str
resource_parent_name, # type: str
resource_type, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListConfigurationAssignmentsResult"]
"""List configurationAssignments for resource.
List configurationAssignments for resource.
:param resource_group_name: Resource group name.
:type resource_group_name: str
:param provider_name: Resource provider name.
:type provider_name: str
:param resource_parent_type: Resource parent type.
:type resource_parent_type: str
:param resource_parent_name: Resource parent identifier.
:type resource_parent_name: str
:param resource_type: Resource type.
:type resource_type: str
:param resource_name: Resource identifier.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListConfigurationAssignmentsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.maintenance.models.ListConfigurationAssignmentsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListConfigurationAssignmentsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_parent.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'providerName': self._serialize.url("provider_name", provider_name, 'str'),
'resourceParentType': self._serialize.url("resource_parent_type", resource_parent_type, 'str'),
'resourceParentName': self._serialize.url("resource_parent_name", resource_parent_name, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListConfigurationAssignmentsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.MaintenanceError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_parent.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceParentType}/{resourceParentName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/configurationAssignments'} # type: ignore
def list(
self,
resource_group_name, # type: str
provider_name, # type: str
resource_type, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListConfigurationAssignmentsResult"]
"""List configurationAssignments for resource.
List configurationAssignments for resource.
:param resource_group_name: Resource group name.
:type resource_group_name: str
:param provider_name: Resource provider name.
:type provider_name: str
:param resource_type: Resource type.
:type resource_type: str
:param resource_name: Resource identifier.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListConfigurationAssignmentsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.maintenance.models.ListConfigurationAssignmentsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListConfigurationAssignmentsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'providerName': self._serialize.url("provider_name", provider_name, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListConfigurationAssignmentsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.MaintenanceError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/configurationAssignments'} # type: ignore
| mit | -2,900,234,072,373,119,500 | 50.551971 | 316 | 0.651846 | false |
aburnap/JMD2015-When-Crowdsourcing-Fails | human_crowds/model.py | 1 | 4057 | #-----------------------------------------------------------------------------
#
# Paper: When Crowdsourcing Fails: A Study of Expertise on Crowdsourced
# Design Evaluation
# Author: Alex Burnap - [email protected]
# Date: October 10, 2014
# License: Apache v2
# Description: Model definition for creating Bayesian network crowd
# consensus model
#
#-----------------------------------------------------------------------------
import numpy as np
import pymc
import scipy.stats as stats
def create_model(evaluation_matrix, num_participants, num_designs):
"""
Function creates Bayesian network model defition as dict for PyMC, called
by simulation_X.py.
Input: evaluation matrix -
Output: Dict of PyMC
Note: Current hyperparameters are hard coded as in paper
"""
#--------------- Data Manipulation of Evaluation Matrix-------------------
indices = np.nonzero(evaluation_matrix)
participant_indices, design_indices = indices[0], indices[1]
observed_evaluations = evaluation_matrix.reshape(num_participants*num_designs)
observed_evaluations = np.ma.masked_equal(observed_evaluations,0).compressed()
observed_evaluations = (observed_evaluations-1)/4.0
#--- 1st Level --- Hyperparameters of Priors -----------------------------
ability_mu_prior = 0.5
ability_tau_prior = 0.1
logistic_scale_mu = 0.07
logistic_scale_tau = 1.0
criteria_score_mu_prior = 0.5
criteria_score_tau_prior = 0.1
#--- 2nd Level --- Ability, Difficulty, Logistic Scale, Inv-Wishart Var --
"""
Currently only each participant has it's own node, there is common node
for difficulty, logistic scale, and inv_wishart_var
"""
ability_vector = pymc.TruncatedNormal('ability', mu=ability_mu_prior,
tau=ability_tau_prior, a=0, b=1, value=.5*np.ones(num_participants))
design_difficulty_num = pymc.TruncatedNormal('design_difficulty',
mu=0.5, tau=1.0, a=0.3, b=0.7, value=0.5)
logistic_scale_num = pymc.TruncatedNormal('logistic_scale', mu=logistic_scale_mu,
tau=logistic_scale_tau, a=.01, b=.2, value=.07)#, value=.1*np.ones(num_participants))
inv_gamma_var = .01 # turn this to density later
#--- 3rd Level ---- Logistic, Alpha, Beta Deterministic ------------------
@pymc.deterministic
def logistic_det(ability=ability_vector, difficulty=design_difficulty_num, scale=logistic_scale_num):
sigma = np.array(1 - stats.logistic.cdf(ability-difficulty,0,scale)).clip(
np.spacing(1)*10, 1e6) #this is done to prevent dividing by 0
return sigma
@pymc.deterministic
def alpha_det(E=logistic_det, V=inv_gamma_var):
return (E**2)/V + 2
@pymc.deterministic
def beta_det(E=logistic_det, V=inv_gamma_var):
return (E*((E**2)/V + 1))
#--- 4th Level --- Inverse-Gamma and True Score --------------------------
criteria_score_vector = pymc.TruncatedNormal('criteria_score', mu=criteria_score_mu_prior,
tau=criteria_score_tau_prior, a=0, b=1, value=.5*np.ones(num_designs))
inverse_gamma_vector = pymc.InverseGamma('inverse_gamma', alpha=alpha_det, beta=beta_det,
value=0.5*np.ones(num_participants))
#--- 5th Level ---- Evaluations -------------------------------
y = pymc.TruncatedNormal('y', mu=criteria_score_vector[design_indices],
tau=1/(inverse_gamma_vector[participant_indices]**2),
a=0, b=1, value=observed_evaluations, observed=True)
#--- Return All MCMC Objects ---------------------------------------------
return {'y':y ,
'criteria_score_vector': criteria_score_vector,
'inverse_gamma_vector': inverse_gamma_vector,
'alpha_det': alpha_det,
'beta_det': beta_det,
'logistic_det': logistic_det,
'logistic_scale_num': logistic_scale_num,
'ability_vector':ability_vector,
'design_difficulty_num':design_difficulty_num}
| mit | 5,495,789,919,888,849,000 | 41.705263 | 106 | 0.597486 | false |
prosodylab/Prosodylab-Aligner | aligner/__main__.py | 1 | 6425 | # Copyright (c) 2011-2014 Kyle Gorman and Michael Wagner
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Command-line driver for the module
"""
import logging
import os
import sys
import yaml
from bisect import bisect
from shutil import copyfile
from textgrid import MLF
from .corpus import Corpus
from .aligner import Aligner
from .archive import Archive
from .utilities import splitname, resolve_opts, \
ALIGNED, CONFIG, HMMDEFS, MACROS, SCORES
from argparse import ArgumentParser
DICTIONARY = "eng.dict"
MODEL = "eng.zip"
LOGGING_FMT = "%(message)s"
# parse arguments
argparser = ArgumentParser(prog="{} -m aligner".format(sys.executable),
description="Prosodylab-Aligner")
argparser.add_argument("-c", "--configuration",
help="config file")
argparser.add_argument("-d", "--dictionary", metavar="DICT", action="append",
help="dictionary file (default: {}) (can specify multiple)".format(DICTIONARY))
argparser.add_argument("-s", "--samplerate", type=int,
help="analysis samplerate (in Hz)")
argparser.add_argument("-e", "--epochs", type=int,
help="# of epochs of training per round")
input_group = argparser.add_argument_group()
input_group.add_argument("-r", "--read",
help="source for a precomputed acoustic model")
input_group.add_argument("-t", "--train",
help="directory containing data for training")
output_group = argparser.add_mutually_exclusive_group(required=True)
output_group.add_argument("-a", "--align",
help="directory containing data to align")
output_group.add_argument("-w", "--write",
help="destination for computed acoustic model")
verbosity_group = argparser.add_mutually_exclusive_group()
verbosity_group.add_argument("-v", "--verbose", action="store_true",
help="Verbose output")
verbosity_group.add_argument("-V", "--extra-verbose", action="store_true",
help="Even more verbose output")
args = argparser.parse_args()
# hack to allow proper override of default dictionary
if not args.dictionary:
args.dictionary = [DICTIONARY]
# set up logging
loglevel = logging.WARNING
if args.extra_verbose:
loglevel = logging.DEBUG
elif args.verbose:
loglevel = logging.INFO
logging.basicConfig(format=LOGGING_FMT, level=loglevel)
# input: pick one
if args.train:
if args.read:
logging.error("Cannot train on persistent model.")
exit(1)
logging.info("Preparing corpus '{}'.".format(args.train))
opts = resolve_opts(args)
corpus = Corpus(args.train, opts)
logging.info("Preparing aligner.")
aligner = Aligner(opts)
logging.info("Training aligner on corpus '{}'.".format(args.train))
aligner.HTKbook_training_regime(corpus, opts["epochs"],
flatstart=(args.read is None))
else:
if not args.read:
args.read = MODEL
logging.info("Reading aligner from '{}'.".format(args.read))
# warn about irrelevant flags
if args.configuration:
logging.warning("Ignoring config flag (-c/--configuration).")
args.configuration = None
if args.epochs:
logging.warning("Ignoring epochs flag (-e/--epochs).")
if args.samplerate:
logging.warning("Ignoring samplerate flag (-s/--samplerate).")
args.samplerate = None
# create archive from -r argument
archive = Archive(args.read)
# read configuration file therefrom, and resolve options with it
args.configuration = os.path.join(archive.dirname, CONFIG)
opts = resolve_opts(args)
# initialize aligner and set it to point to the archive data
aligner = Aligner(opts)
aligner.curdir = archive.dirname
# output: pick one
if args.align:
# check to make sure we're not aligning on the training data
if (not args.train) or (os.path.realpath(args.train) !=
os.path.realpath(args.align)):
logging.info("Preparing corpus '{}'.".format(args.align))
corpus = Corpus(args.align, opts)
logging.info("Aligning corpus '{}'.".format(args.align))
aligned = os.path.join(args.align, ALIGNED)
scores = os.path.join(args.align, SCORES)
aligner.align_and_score(corpus, aligned, scores)
logging.debug("Wrote MLF file to '{}'.".format(aligned))
logging.debug("Wrote likelihood scores to '{}'.".format(scores))
logging.info("Writing TextGrids.")
size = MLF(aligned).write(args.align)
if not size:
logging.error("No paths found!")
exit(1)
logging.debug("Wrote {} TextGrids.".format(size))
elif args.write:
# create and populate archive
(_, basename, _) = splitname(args.write)
archive = Archive.empty(basename)
archive.add(os.path.join(aligner.curdir, HMMDEFS))
archive.add(os.path.join(aligner.curdir, MACROS))
# whatever this is, it's not going to work once you move the data
if "dictionary" in opts:
del opts["dictionary"]
with open(os.path.join(archive.dirname, CONFIG), "w") as sink:
yaml.dump(opts, sink)
(basename, _) = os.path.splitext(args.write)
archive_path = os.path.relpath(archive.dump(basename))
logging.info("Wrote aligner to '{}'.".format(archive_path))
# else unreachable
logging.info("Success!")
| mit | -8,438,024,029,651,108,000 | 39.408805 | 102 | 0.674241 | false |
PaloAltoNetworks/minemeld-core | minemeld/chassis.py | 1 | 6761 | # Copyright 2015-2016 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
minemeld.chassis
A chassis instance contains a list of nodes and a fabric.
Nodes communicate using the fabric.
"""
import os
import logging
import gevent
import gevent.queue
import gevent.monkey
gevent.monkey.patch_all(thread=False, select=False)
import minemeld.mgmtbus
import minemeld.ft
import minemeld.fabric
LOG = logging.getLogger(__name__)
STATE_REPORT_INTERVAL = 10
class Chassis(object):
"""Chassis class
Args:
fabricclass (str): class for the fabric
fabricconfig (dict): config dictionary for fabric,
class specific
mgmtbusconfig (dict): config dictionary for mgmt bus
"""
def __init__(self, fabricclass, fabricconfig, mgmtbusconfig):
self.chassis_id = os.getpid()
self.fts = {}
self.poweroff = gevent.event.AsyncResult()
self.fabric_class = fabricclass
self.fabric_config = fabricconfig
self.fabric = minemeld.fabric.factory(
self.fabric_class,
self,
self.fabric_config
)
self.mgmtbus = minemeld.mgmtbus.slave_hub_factory(
mgmtbusconfig['slave'],
mgmtbusconfig['transport']['class'],
mgmtbusconfig['transport']['config']
)
self.mgmtbus.add_failure_listener(self.mgmtbus_failed)
self.mgmtbus.request_chassis_rpc_channel(self)
self.log_channel_queue = gevent.queue.Queue(maxsize=128)
self.log_channel = self.mgmtbus.request_log_channel()
self.log_glet = None
self.status_channel_queue = gevent.queue.Queue(maxsize=128)
self.status_glet = None
def _dynamic_load(self, classname):
modname, classname = classname.rsplit('.', 1)
imodule = __import__(modname, globals(), locals(), [classname])
cls = getattr(imodule, classname)
return cls
def get_ft(self, ftname):
return self.fts.get(ftname, None)
def configure(self, config):
"""configures the chassis instance
Args:
config (list): list of FTs
"""
newfts = {}
for ft in config:
ftconfig = config[ft]
LOG.debug(ftconfig)
# new FT
newfts[ft] = minemeld.ft.factory(
ftconfig['class'],
name=ft,
chassis=self,
config=ftconfig.get('config', {})
)
newfts[ft].connect(
ftconfig.get('inputs', []),
ftconfig.get('output', False)
)
self.fts = newfts
# XXX should be moved to constructor
self.mgmtbus.start()
self.fabric.start()
self.mgmtbus.send_master_rpc(
'chassis_ready',
params={'chassis_id': self.chassis_id},
timeout=10
)
def request_mgmtbus_channel(self, ft):
self.mgmtbus.request_channel(ft)
def request_rpc_channel(self, ftname, ft, allowed_methods=None):
if allowed_methods is None:
allowed_methods = []
self.fabric.request_rpc_channel(ftname, ft, allowed_methods)
def request_pub_channel(self, ftname):
return self.fabric.request_pub_channel(ftname)
def request_sub_channel(self, ftname, ft, subname, allowed_methods=None):
if allowed_methods is None:
allowed_methods = []
self.fabric.request_sub_channel(ftname, ft, subname, allowed_methods)
def send_rpc(self, sftname, dftname, method, params, block, timeout):
return self.fabric.send_rpc(sftname, dftname, method, params,
block=block, timeout=timeout)
def _log_actor(self):
while True:
try:
params = self.log_channel_queue.get()
self.log_channel.publish(
method='log',
params=params
)
except Exception:
LOG.exception('Error sending log')
def log(self, timestamp, nodename, log_type, value):
self.log_channel_queue.put({
'timestamp': timestamp,
'source': nodename,
'log_type': log_type,
'log': value
})
def _status_actor(self):
while True:
try:
params = self.status_channel_queue.get()
self.mgmtbus.send_status(
params=params
)
except Exception:
LOG.exception('Error publishing status')
def publish_status(self, timestamp, nodename, status):
self.status_channel_queue.put({
'timestamp': timestamp,
'source': nodename,
'status': status
})
def fabric_failed(self):
self.stop()
def mgmtbus_failed(self):
LOG.critical('chassis - mgmtbus failed')
self.stop()
def mgmtbus_start(self):
LOG.info('chassis - start received from mgmtbus')
self.start()
return 'ok'
def fts_init(self):
for ft in self.fts.values():
if ft.get_state() < minemeld.ft.ft_states.INIT:
return False
return True
def stop(self):
LOG.info("chassis stop called")
if self.log_glet is not None:
self.log_glet.kill()
if self.status_glet is not None:
self.status_glet.kill()
if self.fabric is None:
return
for ftname, ft in self.fts.iteritems():
try:
ft.stop()
except:
LOG.exception('Error stopping {}'.format(ftname))
LOG.info('Stopping fabric')
self.fabric.stop()
LOG.info('Stopping mgmtbus')
self.mgmtbus.stop()
LOG.info('chassis - stopped')
self.poweroff.set(value='stop')
def start(self):
LOG.info("chassis start called")
self.log_glet = gevent.spawn(self._log_actor)
self.status_glet = gevent.spawn(self._status_actor)
for ftname, ft in self.fts.iteritems():
LOG.debug("starting %s", ftname)
ft.start()
self.fabric.start_dispatching()
| apache-2.0 | 8,715,804,090,073,838,000 | 28.017167 | 77 | 0.580388 | false |
myDevicesIoT/Cayenne-Agent | myDevices/devices/digital/__init__.py | 1 | 4181 | # Copyright 2012-2013 Eric Ptak - trouch.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from myDevices.decorators.rest import request, response
from myDevices.utils.types import M_JSON
class GPIOPort():
IN = 0
OUT = 1
LOW = False
HIGH = True
def __init__(self, channelCount):
self.digitalChannelCount = channelCount
def checkDigitalChannel(self, channel):
if not 0 <= channel < self.digitalChannelCount:
raise ValueError("Channel %d out of range [%d..%d]" % (channel, 0, self.digitalChannelCount-1))
def checkDigitalValue(self, value):
if not (value == 0 or value == 1):
raise ValueError("Value %d not in {0, 1}")
@response("%d")
def digitalCount(self):
return self.digitalChannelCount
def __family__(self):
return "GPIOPort"
def __getFunction__(self, channel):
raise NotImplementedError
def __setFunction__(self, channel, func):
raise NotImplementedError
def __digitalRead__(self, channel):
raise NotImplementedError
def __portRead__(self):
raise NotImplementedError
def __digitalWrite__(self, channel, value):
raise NotImplementedError
def __portWrite__(self, value):
raise NotImplementedError
def getFunction(self, channel):
self.checkDigitalChannel(channel)
return self.__getFunction__(channel)
def getFunctionString(self, channel):
func = self.getFunction(channel)
if func == self.IN:
return "IN"
elif func == self.OUT:
return "OUT"
# elif func == GPIO.PWM:
# return "PWM"
else:
return "UNKNOWN"
def setFunction(self, channel, value):
self.checkDigitalChannel(channel)
self.__setFunction__(channel, value)
return self.__getFunction__(channel)
def setFunctionString(self, channel, value):
value = value.lower()
if value == "in":
self.setFunction(channel, self.IN)
elif value == "out":
self.setFunction(channel, self.OUT)
# elif value == "pwm":
# self.setFunction(channel, GPIO.PWM)
else:
raise ValueError("Bad Function")
return self.getFunctionString(channel)
@response("%d")
def digitalRead(self, channel):
self.checkDigitalChannel(channel)
return self.__digitalRead__(channel)
@response(contentType=M_JSON)
def wildcard(self, compact=False):
if compact:
f = "f"
v = "v"
else:
f = "function"
v = "value"
values = {}
for i in range(self.digitalChannelCount):
if compact:
func = self.__getFunction__(i)
else:
func = self.getFunctionString(i)
values[i] = {f: func, v: int(self.digitalRead(i))}
return values
@response("%d")
def portRead(self):
return self.__portRead__()
@response("%d")
def digitalWrite(self, channel, value):
self.checkDigitalChannel(channel)
self.checkDigitalValue(value)
self.__digitalWrite__(channel, value)
return self.digitalRead(channel)
@response("%d")
def portWrite(self, value):
self.__portWrite__(value)
return self.portRead()
DRIVERS = {}
DRIVERS["helper"] = ["DigitalSensor", "DigitalActuator", "LightSwitch", "MotorSwitch", "RelaySwitch", "ValveSwitch", "MotionSensor"]
DRIVERS["pcf8574" ] = ["PCF8574", "PCF8574A"]
DRIVERS["ds2408" ] = ["DS2408"]
| mit | -4,353,876,563,491,724,300 | 29.97037 | 132 | 0.60177 | false |
palful/yambopy | yambopy/dbs/wfdb.py | 1 | 3127 | from yambopy import *
import numpy as np
import shutil
import os
from netCDF4 import Dataset
def abs2(x):
return x.real**2 + x.imag**2
class YamboWFDB():
def __init__(self,savedb,path=None,save='SAVE',filename='ns.wf'):
"""
load wavefunction from yambo
"""
if path is None:
self.path = save
else:
self.path = path+'/SAVE'
self.filename = filename
#take some data from savedb
self.savedb = savedb
self.wfcgrid = savedb.wfcgrid
self.gvectors = savedb.gvectors
self.kpoints = savedb.kpts_car
self.lat = savedb.lat
self.rlat = savedb.rlat
#read wf
self.read()
self.nkpoints, self.nspin, self.ng, self.nbands = self.wf.shape
def read(self):
path = self.path
filename = self.filename
wf = []
nk = 1
while True:
try:
fname = "%s/%s_fragments_%d_1"%(path,filename,nk)
database = Dataset(fname)
re = database.variables['WF_REAL_COMPONENTS_@_K%d_BAND_GRP_1'%nk][:]
im = database.variables['WF_IM_COMPONENTS_@_K%d_BAND_GRP_1'%nk][:]
a = re+1j*im
wf.append(a)
nk+=1
except:
if nk==1:
raise IOError('Could not read %s'%fname)
break
self.wf = np.array(wf)
self.nkpoints, self.nspin, self.ng, self.nbands = self.wf.shape
def get_wf_gvecs(self,kpoint=0):
"""
Get the indexes of teh wavefunctions
"""
#create array for fft
indexes = self.wfcgrid[kpoint]
indexes = indexes[indexes > 0] #remove componnents that do not belong
gvecs = self.gvectors[indexes]
return gvecs
def write(self,path):
"""
write the wavefunctions in new files
"""
if os.path.isdir(path): shutil.rmtree(path)
os.mkdir(path)
#copy all the files
oldpath = self.path
filename = self.filename
shutil.copyfile("%s/%s"%(oldpath,filename),"%s/%s"%(path,filename))
for nk in xrange(self.nkpoints):
fname = "%s_fragments_%d_1"%(filename,nk+1)
shutil.copyfile("%s/%s"%(oldpath,fname),"%s/%s"%(path,fname))
#edit with the new wfs
wf = self.wf
for nk in xrange(self.nkpoints):
fname = "%s_fragments_%d_1"%(filename,nk+1)
database = Dataset("%s/%s"%(path,fname),'r+')
database.variables['WF_REAL_COMPONENTS_@_K%d_BAND_GRP_1'%(nk+1)][:] = wf[nk].real
database.variables['WF_IM_COMPONENTS_@_K%d_BAND_GRP_1'%(nk+1)][:] = wf[nk].imag
db.close()
print 'new wavefunctions written in %s'%path
def __str__(self):
s = ""
s += "nkpoints: %4d\n"%self.nkpoints
s += "nspin: %4d\n"%self.nspin
s += "nbands: %4d\n"%self.nbands
s += "ng: %4d\n"%self.ng
return s
if __name__ == "__main__":
ywf = YamboWFDB(path='database')
| bsd-3-clause | -842,594,899,618,820,100 | 29.656863 | 93 | 0.521906 | false |
josdaza/deep-toolbox | PyTorch/Seq2Seq/evaluator.py | 1 | 2200 | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
use_cuda = torch.cuda.is_available()
def evaluate(config_params, encoder, decoder, sentence):
SOS_TOKEN = int(config_params["RNN"]["sos_token"])
EOS_TOKEN = int(config_params["RNN"]["eos_token"])
max_length = int(config_params["Main"]["max_length"])
input_variable = variableFromSentence(input_lang, sentence)
input_length = input_variable.size()[0]
encoder_hidden = encoder.initHidden()
encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))
encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_variable[ei],
encoder_hidden)
encoder_outputs[ei] = encoder_outputs[ei] + encoder_output[0][0]
decoder_input = Variable(torch.LongTensor([[SOS_TOKEN]])) # SOS
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
decoder_hidden = encoder_hidden
decoded_words = []
decoder_attentions = torch.zeros(max_length, max_length)
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
decoder_attentions[di] = decoder_attention.data
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
if ni == EOS_TOKEN:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(output_lang.index2word[ni])
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
return decoded_words, decoder_attentions[:di + 1]
def evaluateRandomly(config_params, encoder, decoder, n=10):
for i in range(n):
pair = random.choice(pairs)
print('>', pair[0])
print('=', pair[1])
output_words, attentions = evaluate(config_params, encoder, decoder, pair[0])
output_sentence = ' '.join(output_words)
print('<', output_sentence)
print('')
| mit | 5,671,654,910,903,290,000 | 37.596491 | 85 | 0.644545 | false |
fffonion/xeHentai | xeHentai/i18n/zh_hans.py | 1 | 7970 | # coding:utf-8
from ..const import *
err_msg = {
ERR_URL_NOT_RECOGNIZED: "网址不够绅士",
ERR_CANT_DOWNLOAD_EXH: "需要登录后才能下载里站",
ERR_ONLY_VISIBLE_EXH: "这个本子只有里站能看到",
ERR_MALFORMED_HATHDL: "hathdl文件有猫饼,解析失败",
ERR_GALLERY_REMOVED: "这个本子被移除了,大概里站能看到",
ERR_KEY_EXPIRED: "下载链接不太正常",
ERR_NO_PAGEURL_FOUND: "没有找到页面链接,网站改版了嘛?",
ERR_CONNECTION_ERROR: "连接有问题?",
ERR_IP_BANNED: "IP被ban了, 恢复时间: %s",
ERR_IMAGE_BROKEN: "下载的图片有猫饼",
ERR_QUOTA_EXCEEDED: "配额超限",
ERR_TASK_NOT_FOUND: "没有该GUID对应的任务",
ERR_TASK_LEVEL_UNDEF: "任务过滤等级不存在",
ERR_DELETE_RUNNING_TASK: "无法删除运行中的任务",
ERR_TASK_CANNOT_PAUSE: "这个任务无法被暂停",
ERR_TASK_CANNOT_RESUME: "这个任务无法被恢复",
ERR_CANNOT_CREATE_DIR: "无法创建文件夹 %s",
ERR_CANNOT_MAKE_ARCHIVE: "无法制作压缩包 %s",
ERR_NOT_RANGE_FORMAT: "'%s'不符合范围的格式, 正确的格式为 1-3 或者 5",
# ERR_HATHDL_NOTFOUND: "hathdl文件未找到"
ERR_RPC_PARSE_ERROR: "Parse error.",
ERR_RPC_INVALID_REQUEST: "Invalid request.",
ERR_RPC_METHOD_NOT_FOUND: "Method not found.",
ERR_RPC_INVALID_PARAMS: "Invalid method parameter(s).",
ERR_RPC_UNAUTHORIZED: "Unauthorized",
ERR_RPC_EXEC_ERROR: "",
ERR_SAVE_SESSION_FAILED: "",
}
ERR_NOMSG = "未指定的错误,错误号 %d"
XEH_OPT_DESC = "绅♂士下载器"
XEH_OPT_EPILOG = "如果参数未指定,则使用config.py中的默认值; " \
"讨论和反馈问题:https://yooooo.us/2013/xehentai"
XEH_OPT_URLS = "下载页的网址"
XEH_OPT_u = "用户名"
XEH_OPT_k = "密码"
XEH_OPT_c = "Cookie字符串,如果指定了用户名和密码,此项会被忽略"
XEH_OPT_o = "是否下载原始图片(如果存在),需要登录 (当前: %(default)s)"
XEH_OPT_t = "下载线程数 (当前: %(default)d)"
XEH_OPT_l = "保存日志的路径 (当前: %(default)s)"
XEH_OPT_p = "设置代理, 可以指定多次, 当前支持的类型: socks5/4a, http(s), glype. 代理默认只用于扫描网页 (当前: %(default)s)"
XEH_OPT_proxy_image = "同时使用代理来下载图片和扫描网页 (当前: %(default)s)"
XEH_OPT_proxy_image_only = "仅使用代理来下载图片, 不用于扫描网页 (当前: %(default)s)"
XEH_OPT_d = "设置下载目录 (当前: %(default)s)"
XEH_OPT_v = "设置日志装逼等级 (当前: %(default)s)"
XEH_OPT_i = "交互模式,如果开启后台模式,此项会被忽略 (当前: %(default)s)"
XEH_OPT_r = "将图片重命名为原始名称,如果关闭则使用序号 (当前: %(default)s)"
XEH_OPT_daemon = "后台模式 (当前: %(default)s)"
XEH_OPT_rpc_interface = "设置JSON-RPC监听IP (当前: %(default)s)"
XEH_OPT_rpc_port = "设置JSON-RPC监听端口 (当前: %(default)s)"
XEH_OPT_rpc_secret = "设置JSON-RPC密钥 (当前: %(default)s)"
XEH_OPT_rpc_open_browser = "RPC服务端启动后自动打开浏览器页面 (当前: %(default)s)"
XEH_OPT_a = "下载完成后生成zip压缩包并删除下载目录 (当前: %(default)s)"
XEH_OPT_delete_task_files = "删除任务时同时删除下载的文件 (当前: %(default)s)"
XEH_OPT_j = "使用日语标题, 如果关闭则使用英文或罗马字标题 (当前: %(default)s)"
XEH_OPT_download_range = "设置下载的图片范围, 格式为 开始位置-结束位置, 或者单张图片的位置, " \
"使用逗号来分隔多个范围, 例如 5-10,15,20-25, 默认为下载所有"
XEH_OPT_timeout = "设置下载图片的超时 (当前: %(default)s秒)"
XEH_OPT_low_speed = "设置最低下载速度,低于此值将换源重新下载 (当前: %(default)s KB/s)"
XEH_OPT_f = "忽略配额判断,继续下载 (当前: %(current)s)"
XEH_OPT_h = "显示本帮助信息"
XEH_OPT_version = "显示版本信息"
XEH_OPT_IGNORING_I = "后台模式已忽略 -i 参数"
XEH_OPT_auto_update = "检查并自动下载更新"
XEH_OPT_update_beta_channel = "是否更新到测试分支"
PS_LOGIN = "当前没有登陆,要登陆吗 (y/n)? > "
PS_USERNAME = "输入用户名 > "
PS_PASSWD = "输入密码 > "
PS_URL = "输入地址(使用,分割下载多个)> "
PS_PROXY = "输入代理地址 (可选) > "
PS_DOWNLOAD_ORI = "是否下载原图(默认%s) (y/n)? > "
PS_RENAME_ORI = "是否自动重命名(默认%s) (y/n)? > "
PS_MAKE_ARCHIVE = "是否制作zip压缩包(默认%s) (y/n)? > "
PS_JPN_TITLE = "是否使用日语标题(默认%s) (y/n)? > "
PS_DOWNLOAD_RANGE = "下载范围, 使用逗号分割多个范围, 回车下载全部 > "
PS_DOWNLOAD_DIR = "下载目录 (当前: %s)\n回车确认或输入新路径 > "
PROXY_CANDIDATE_CNT = "代理池中有%d个代理"
TASK_PUT_INTO_WAIT = "任务 #%s 已存在, 加入等待队列"
TASK_ERROR = "任务 #%s 发生错误: %s"
TASK_MIGRATE_EXH = "任务 #%s 使用里站地址重新下载"
TASK_TITLE = "任务 #%s 标题 %s"
TASK_WILL_DOWNLOAD_CNT = "任务 #%s 将下载%d个文件,共%d个 "
TASK_START = "任务 #%s 开始"
TASK_FINISHED = "任务 #%s 下载完成"
TASK_START_PAGE_RESCAN = "任务 #%s 图片被缩放,进行完整扫描"
# TASK_FAST_SCAN = "任务 #%s 使用快速扫描"
TASK_START_MAKE_ARCHIVE = "任务 #%s 开始打包"
TASK_MAKE_ARCHIVE_FINISHED = "任务 #%s 打包完成,保存在: %s, 用时%.1f秒"
TASK_STOP_QUOTA_EXCEEDED = "任务 #%s 配额超限"
TASK_STUCK = "任务 #%s 卡住了, 可能是脚本有bug, 或者网络连接太慢了"
TASK_SLOW = "任务 #%s 有点慢, 可能是图片太大了,或者网络连接太慢了; 可以考虑使用代理"
TASK_UNFINISHED = "任务 #%s 剩余以下图片未下载: %s"
XEH_STARTED = "xeHentai %s 已启动"
XEH_LOOP_FINISHED = "程序循环已完成"
XEH_LOGIN_EXHENTAI = "登录绅士"
XEH_LOGIN_OK = "已成为绅士"
XEH_LOGIN_FAILED = "无法登录绅士;检查输入是否有误或者换一个帐号。\n推荐在浏览器登录后使用RPC复制cookie到xeHentai (教程: http://t.cn/Rctr4Pf)"
XEH_LOAD_TASKS_CNT = "从存档中读取了%d个任务"
XEH_LOAD_OLD_COOKIE = "从1.x版cookie文件从读取了登录信息"
XEH_DAEMON_START = "后台进程已启动,PID为%d"
XEH_PLATFORM_NO_DAEMON = "后台模式不支持您的系统: %s"
XEH_CLEANUP = "擦干净..."
XEH_CRITICAL_ERROR = "xeHentai 抽风啦:\n%s"
XEH_DOWNLOAD_ORI_NEED_LOGIN = "下载原图需要登录"
XEH_FILE_DOWNLOADED = "绅士-{} 已下载图片 #{} {}"
XEH_RENAME_HAS_ERRORS = "部分图片重命名失败:\n%s"
XEH_DOWNLOAD_HAS_ERROR = "绅士-%s 下载图片 #%s 时出错: %s, 将在稍后重试"
XEH_SCAN_FAILED = "%s 扫描页面 %s 失败: %d"
RPC_STARTED = "RPC服务器监听在 %s:%d"
RPC_TOO_OPEN = "RPC服务器监听在公网IP (%s),为了安全起见应该设置rpc_secret"
RPC_CANNOT_BIND = "RPC服务器无法启动:%s"
RPC_WEBUI_PATH = "WebUI 地址为 %s 或者 https://xehentai.yooooo.us"
SESSION_LOAD_EXCEPTION = "读取存档时遇到错误: %s"
SESSION_WRITE_EXCEPTION = "写入存档时遇到错误: %s"
THREAD = "绅士"
THREAD_UNCAUGHT_EXCEPTION = "绅士-%s 未捕获的异常\n%s"
THREAD_MAY_BECOME_ZOMBIE = "绅士-%s 可能变成了丧尸"
THREAD_SWEEP_OUT = "绅士-%s 挂了, 不再理它"
THREAD_SPEED_TOO_LOW = "绅士-%s 下载速度只有 %s/s, 低于 %s/s, 将在稍后重试"
QUEUE = "队列"
PROXY_DISABLE_BANNED = "禁用了一个被ban的代理,将在约%s秒后恢复"
UPDATE_CHANNEL = "更新渠道为: %s"
UPDATE_DEV_CHANNEL = "测试版"
UPDATE_RELEASE_CHANNEL = "正式版"
UPDATE_FAILED = "更新时遇到错误: %s"
UPDATE_COMPLETE = "更新完成,请重新启动程序应用更新"
UPDATE_NO_UPDATE = "没有可用更新"
UPDATE_AVAILABLE = "发现可用的更新: 发布于 %s \"%s\" (%s)"
UPDATE_DOWNLOAD_MANUALLY = "可以从 https://dl.yooooo.us/share/xeHentai/ 下载更新"
| gpl-3.0 | -7,945,369,684,443,056,000 | 36.082192 | 103 | 0.668452 | false |
collective/fourdigits.portlet.keywordrelated | bootstrap.py | 1 | 10500 | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os, shutil, sys, tempfile, urllib, urllib2, subprocess
from optparse import OptionParser
if sys.platform == 'win32':
def quote(c):
if ' ' in c:
return '"%s"' % c # work around spawn lamosity on windows
else:
return c
else:
quote = str
# See zc.buildout.easy_install._has_broken_dash_S for motivation and comments.
stdout, stderr = subprocess.Popen(
[sys.executable, '-Sc',
'try:\n'
' import ConfigParser\n'
'except ImportError:\n'
' print 1\n'
'else:\n'
' print 0\n'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
has_broken_dash_S = bool(int(stdout.strip()))
# In order to be more robust in the face of system Pythons, we want to
# run without site-packages loaded. This is somewhat tricky, in
# particular because Python 2.6's distutils imports site, so starting
# with the -S flag is not sufficient. However, we'll start with that:
if not has_broken_dash_S and 'site' in sys.modules:
# We will restart with python -S.
args = sys.argv[:]
args[0:0] = [sys.executable, '-S']
args = map(quote, args)
os.execv(sys.executable, args)
# Now we are running with -S. We'll get the clean sys.path, import site
# because distutils will do it later, and then reset the path and clean
# out any namespace packages from site-packages that might have been
# loaded by .pth files.
clean_path = sys.path[:]
import site # imported because of its side effects
sys.path[:] = clean_path
for k, v in sys.modules.items():
if k in ('setuptools', 'pkg_resources') or (
hasattr(v, '__path__') and
len(v.__path__) == 1 and
not os.path.exists(os.path.join(v.__path__[0], '__init__.py'))):
# This is a namespace package. Remove it.
sys.modules.pop(k)
is_jython = sys.platform.startswith('java')
setuptools_source = 'http://peak.telecommunity.com/dist/ez_setup.py'
distribute_source = 'http://python-distribute.org/distribute_setup.py'
# parsing arguments
def normalize_to_url(option, opt_str, value, parser):
if value:
if '://' not in value: # It doesn't smell like a URL.
value = 'file://%s' % (
urllib.pathname2url(
os.path.abspath(os.path.expanduser(value))),)
if opt_str == '--download-base' and not value.endswith('/'):
# Download base needs a trailing slash to make the world happy.
value += '/'
else:
value = None
name = opt_str[2:].replace('-', '_')
setattr(parser.values, name, value)
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --setup-source and --download-base to point to
local resources, you can keep this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", dest="version",
help="use a specific zc.buildout version")
parser.add_option("-d", "--distribute",
action="store_true", dest="use_distribute", default=False,
help="Use Distribute rather than Setuptools.")
parser.add_option("--setup-source", action="callback", dest="setup_source",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or file location for the setup file. "
"If you use Setuptools, this will default to " +
setuptools_source + "; if you use Distribute, this "
"will default to " + distribute_source + "."))
parser.add_option("--download-base", action="callback", dest="download_base",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or directory for downloading "
"zc.buildout and either Setuptools or Distribute. "
"Defaults to PyPI."))
parser.add_option("--eggs",
help=("Specify a directory for storing eggs. Defaults to "
"a temporary directory that is deleted when the "
"bootstrap script completes."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", None, action="store", dest="config_file",
help=("Specify the path to the buildout configuration "
"file to be used."))
options, args = parser.parse_args()
if options.eggs:
eggs_dir = os.path.abspath(os.path.expanduser(options.eggs))
else:
eggs_dir = tempfile.mkdtemp()
if options.setup_source is None:
if options.use_distribute:
options.setup_source = distribute_source
else:
options.setup_source = setuptools_source
if options.accept_buildout_test_releases:
args.insert(0, 'buildout:accept-buildout-test-releases=true')
try:
import pkg_resources
import setuptools # A flag. Sometimes pkg_resources is installed alone.
if not hasattr(pkg_resources, '_distribute'):
raise ImportError
except ImportError:
ez_code = urllib2.urlopen(
options.setup_source).read().replace('\r\n', '\n')
ez = {}
exec ez_code in ez
setup_args = dict(to_dir=eggs_dir, download_delay=0)
if options.download_base:
setup_args['download_base'] = options.download_base
if options.use_distribute:
setup_args['no_fake'] = True
if sys.version_info[:2] == (2, 4):
setup_args['version'] = '0.6.32'
ez['use_setuptools'](**setup_args)
if 'pkg_resources' in sys.modules:
reload(sys.modules['pkg_resources'])
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
cmd = [quote(sys.executable),
'-c',
quote('from setuptools.command.easy_install import main; main()'),
'-mqNxd',
quote(eggs_dir)]
if not has_broken_dash_S:
cmd.insert(1, '-S')
find_links = options.download_base
if not find_links:
find_links = os.environ.get('bootstrap-testing-find-links')
if not find_links and options.accept_buildout_test_releases:
find_links = 'http://downloads.buildout.org/'
if find_links:
cmd.extend(['-f', quote(find_links)])
if options.use_distribute:
setup_requirement = 'distribute'
else:
setup_requirement = 'setuptools'
ws = pkg_resources.working_set
setup_requirement_path = ws.find(
pkg_resources.Requirement.parse(setup_requirement)).location
env = dict(
os.environ,
PYTHONPATH=setup_requirement_path)
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setup_requirement_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if distv >= pkg_resources.parse_version('2dev'):
continue
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement += '=='+version
else:
requirement += '<2dev'
cmd.append(requirement)
if is_jython:
import subprocess
exitcode = subprocess.Popen(cmd, env=env).wait()
else: # Windows prefers this, apparently; otherwise we would prefer subprocess
exitcode = os.spawnle(*([os.P_WAIT, sys.executable] + cmd + [env]))
if exitcode != 0:
sys.stdout.flush()
sys.stderr.flush()
print ("An error occurred when trying to install zc.buildout. "
"Look above this message for any errors that "
"were output by easy_install.")
sys.exit(exitcode)
ws.add_entry(eggs_dir)
ws.require(requirement)
import zc.buildout.buildout
# If there isn't already a command in the args, add bootstrap
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout's main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
if not options.eggs: # clean up temporary egg directory
shutil.rmtree(eggs_dir) | gpl-2.0 | -5,138,904,244,975,088,000 | 36.909747 | 78 | 0.626476 | false |
jath03/projects | local/Python server-client/MyServer.py | 1 | 2622 | from http.server import BaseHTTPRequestHandler, HTTPServer
import threading, pickle, re, subprocess
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
f_type_map = {'.html': 'text/html', '.css': 'text/css', '.ico': 'image/x-icon', '.jpg': 'image/jpeg', '.png': 'image/png', '.gif': 'image/gif', '.js': 'text/javascript', '.py': 'test/python'}
t_type = re.compile('\/|(\.\w*)')
r_file = self.path.split('?')
requested_type = t_type.findall(self.path)
print(requested_type)
ex = requested_type[-1]
if ex != '.py':
try:
self.send_response(200)
self.send_header('Content-type', f_type_map[ex])
self.send_header('Content-Encoding', 'utf-8')
self.end_headers()
try:
with open('C:\\Users\jackt.JACK-IS-AWESOME\OneDrive\LaptopProjects\Google test signin\Google%s'% r_file[0]) as file:
f = file.read()
#f = 'This is my secret message'
self.wfile.write(bytes(f, 'utf8'))
except UnicodeDecodeError:
with open('C:\\Users\jackt.JACK-IS-AWESOME\OneDrive\LaptopProjects\Google test signin\Google%s'% r_file[0], 'rb') as f:
file = f.read()
self.wfile.write(file)
except IOError:
self.send_response(404, 'File Not Found')
self.wfile.write(bytes('404 file not found', 'utf8'))
except KeyError:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
with open('C:\\Users\jackt.JACK-IS-AWESOME\OneDrive\LaptopProjects\Google test signin\Google\index.html') as file:
f = 'This is my secret message'
#f = file.read()
self.wfile.write(bytes(f, 'utf8'))
return
else:
file = subprocess.run(['python3 C:\\Users\jackt.JACK-IS-AWESOME\OneDrive\LaptopProjects\Python server-client\cgi\firstcgi.py'], stdout=subprocess.PIPE)
self.wfile.write(bytes(file, 'utf-8'))
server_address = ('192.168.1.233', 6789)
def run():
print('starting server ...')
httpd = HTTPServer(server_address, MyHandler)
httpd.serve_forever()
bg_server= threading.Thread(target = run)
###Uncomment the next line if you want to have the server start when the file is run###
bg_server.start()
print('\nserver started at %s:%s'% server_address)
| mit | 8,561,099,485,359,479,000 | 47.555556 | 199 | 0.553776 | false |
alirizakeles/zato | code/zato-web-admin/src/zato/admin/web/views/channel/amqp.py | 1 | 3812 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2011 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import logging
from traceback import format_exc
# Django
from django.http import HttpResponse, HttpResponseServerError
# anyjson
from anyjson import dumps
# Zato
from zato.admin.web.forms.channel.amqp import CreateForm, EditForm
from zato.admin.web.views import Delete as _Delete, get_definition_list, \
Index as _Index, method_allowed
from zato.common.odb.model import ChannelAMQP
logger = logging.getLogger(__name__)
def _get_edit_create_message(params, prefix=''):
""" Creates a base dictionary which can be used by both 'edit' and 'create' actions.
"""
return {
'id': params.get('id'),
'cluster_id': params['cluster_id'],
'name': params[prefix + 'name'],
'is_active': bool(params.get(prefix + 'is_active')),
'def_id': params[prefix + 'def_id'],
'queue': params[prefix + 'queue'],
'consumer_tag_prefix': params[prefix + 'consumer_tag_prefix'],
'service': params[prefix + 'service'],
'data_format': params.get(prefix + 'data_format'),
}
def _edit_create_response(client, verb, id, name, def_id, cluster_id):
response = client.invoke('zato.definition.amqp.get-by-id', {'id':def_id, 'cluster_id':cluster_id})
return_data = {'id': id,
'message': 'Successfully {0} the AMQP channel [{1}]'.format(verb, name),
'def_name': response.data.name
}
return HttpResponse(dumps(return_data), content_type='application/javascript')
class Index(_Index):
method_allowed = 'GET'
url_name = 'channel-amqp'
template = 'zato/channel/amqp.html'
service_name = 'zato.channel.amqp.get-list'
output_class = ChannelAMQP
paginate = True
class SimpleIO(_Index.SimpleIO):
input_required = ('cluster_id',)
output_required = ('id', 'name', 'is_active', 'queue', 'consumer_tag_prefix',
'def_name', 'def_id', 'service_name', 'data_format')
output_repeated = True
def handle(self):
create_form = CreateForm(req=self.req)
edit_form = EditForm(prefix='edit', req=self.req)
if self.req.zato.cluster_id:
def_ids = get_definition_list(self.req.zato.client, self.req.zato.cluster, 'amqp')
create_form.set_def_id(def_ids)
edit_form.set_def_id(def_ids)
return {
'create_form': create_form,
'edit_form': edit_form,
}
@method_allowed('POST')
def create(req):
try:
response = req.zato.client.invoke('zato.channel.amqp.create', _get_edit_create_message(req.POST))
return _edit_create_response(req.zato.client, 'created', response.data.id,
req.POST['name'], req.POST['def_id'], req.POST['cluster_id'])
except Exception, e:
msg = 'Could not create an AMQP channel, e:[{e}]'.format(e=format_exc(e))
logger.error(msg)
return HttpResponseServerError(msg)
@method_allowed('POST')
def edit(req):
try:
req.zato.client.invoke('zato.channel.amqp.edit', _get_edit_create_message(req.POST, 'edit-'))
return _edit_create_response(req.zato.client, 'updated', req.POST['id'], req.POST['edit-name'],
req.POST['edit-def_id'], req.POST['cluster_id'])
except Exception, e:
msg = 'Could not update the AMQP channel, e:[{e}]'.format(e=format_exc(e))
logger.error(msg)
return HttpResponseServerError(msg)
class Delete(_Delete):
url_name = 'channel-amqp-delete'
error_message = 'Could not delete the AMQP channel'
service_name = 'zato.channel.amqp.delete'
| gpl-3.0 | 1,374,120,287,783,542,800 | 34.626168 | 105 | 0.633788 | false |
brunobord/critica | apps/archives/views.py | 1 | 2991 | # -*- coding: utf-8 -*-
"""
Views of ``critica.apps.archives`` application.
"""
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator
from django.core.paginator import InvalidPage
from django.core.paginator import EmptyPage
from critica.apps.front.views import home
from critica.apps.front.views import category
from critica.apps.front.views import regions
from critica.apps.front.views import voyages
from critica.apps.front.views import epicurien
from critica.apps.front.views import anger
from critica.apps.issues.models import Issue
from critica.apps.issues.views import _get_current_issue
def archives(request):
"""
Displays archive list.
"""
issue = _get_current_issue()
context = {}
context['issue'] = issue
context['is_current'] = True
try:
item_list = Issue.objects.filter(is_published=True).order_by('-publication_date')
except ObjectDoesNotExist:
item_list = None
paginator = Paginator(item_list, 30)
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try:
context['items'] = paginator.page(page)
except (EmptyPage, InvalidPage):
context['items'] = paginator.page(paginator.num_pages)
return render_to_response(
'archives/archives.html',
context,
context_instance=RequestContext(request)
)
def issuearchive_home(request, issue_number):
"""
Displays home archive of a given issue.
"""
issue = _get_current_issue(issue_number=issue_number)
return home(request, issue=issue, is_archive=True)
def issuearchive_category(request, issue_number, category_slug):
"""
Displays category archive of a given issue.
"""
issue = _get_current_issue(issue_number=issue_number)
return category(request, category_slug, issue=issue, is_archive=True)
def issuearchive_regions(request, issue_number):
"""
Displays "Regions" category of a given issue.
"""
issue = _get_current_issue(issue_number=issue_number)
return regions(request, issue=issue, is_archive=True)
def issuearchive_voyages(request, issue_number):
"""
Displays "Voyages" category of a given issue.
"""
issue = _get_current_issue(issue_number=issue_number)
return voyages(request, issue=issue, is_archive=True)
def issuearchive_epicurien(request, issue_number):
"""
Displays "Epicurien" category of a given issue.
"""
issue = _get_current_issue(issue_number=issue_number)
return epicurien(request, issue=issue, is_archive=True)
def issuearchive_anger(request, issue_number):
"""
Displays "Anger" category of a given issue.
"""
issue = _get_current_issue(issue_number=issue_number)
return anger(request, issue=issue, is_archive=True)
| gpl-3.0 | 3,696,300,011,208,231,400 | 25.945946 | 89 | 0.684721 | false |
aequitas/home-assistant | homeassistant/components/zwave/__init__.py | 1 | 43527 | """Support for Z-Wave."""
import asyncio
import copy
from importlib import import_module
import logging
from pprint import pprint
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.core import callback, CoreState
from homeassistant.helpers import discovery
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity_registry import async_get_registry
from homeassistant.const import (
ATTR_ENTITY_ID, EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers.entity_values import EntityValues
from homeassistant.helpers.event import async_track_time_change
from homeassistant.util import convert
import homeassistant.util.dt as dt_util
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect, async_dispatcher_send)
from . import const
from . import config_flow # noqa pylint: disable=unused-import
from .const import (
CONF_AUTOHEAL, CONF_DEBUG, CONF_POLLING_INTERVAL,
CONF_USB_STICK_PATH, CONF_CONFIG_PATH, CONF_NETWORK_KEY,
DEFAULT_CONF_AUTOHEAL, DEFAULT_CONF_USB_STICK_PATH,
DEFAULT_POLLING_INTERVAL, DEFAULT_DEBUG, DOMAIN,
DATA_DEVICES, DATA_NETWORK, DATA_ENTITY_VALUES)
from .node_entity import ZWaveBaseEntity, ZWaveNodeEntity
from . import workaround
from .discovery_schemas import DISCOVERY_SCHEMAS
from .util import (check_node_schema, check_value_schema, node_name,
check_has_unique_id, is_node_parsed)
_LOGGER = logging.getLogger(__name__)
CLASS_ID = 'class_id'
ATTR_POWER = 'power_consumption'
CONF_POLLING_INTENSITY = 'polling_intensity'
CONF_IGNORED = 'ignored'
CONF_INVERT_OPENCLOSE_BUTTONS = 'invert_openclose_buttons'
CONF_REFRESH_VALUE = 'refresh_value'
CONF_REFRESH_DELAY = 'delay'
CONF_DEVICE_CONFIG = 'device_config'
CONF_DEVICE_CONFIG_GLOB = 'device_config_glob'
CONF_DEVICE_CONFIG_DOMAIN = 'device_config_domain'
DATA_ZWAVE_CONFIG = 'zwave_config'
DEFAULT_CONF_IGNORED = False
DEFAULT_CONF_INVERT_OPENCLOSE_BUTTONS = False
DEFAULT_CONF_REFRESH_VALUE = False
DEFAULT_CONF_REFRESH_DELAY = 5
SUPPORTED_PLATFORMS = ['binary_sensor', 'climate', 'cover', 'fan',
'lock', 'light', 'sensor', 'switch']
RENAME_NODE_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_NAME): cv.string,
})
RENAME_VALUE_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_VALUE_ID): vol.Coerce(int),
vol.Required(const.ATTR_NAME): cv.string,
})
SET_CONFIG_PARAMETER_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_PARAMETER): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_VALUE): vol.Any(vol.Coerce(int), cv.string),
vol.Optional(const.ATTR_CONFIG_SIZE, default=2): vol.Coerce(int)
})
SET_NODE_VALUE_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_VALUE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_VALUE): vol.Coerce(int)
})
REFRESH_NODE_VALUE_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_VALUE_ID): vol.Coerce(int)
})
SET_POLL_INTENSITY_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_VALUE_ID): vol.Coerce(int),
vol.Required(const.ATTR_POLL_INTENSITY): vol.Coerce(int),
})
PRINT_CONFIG_PARAMETER_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_PARAMETER): vol.Coerce(int),
})
NODE_SERVICE_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
})
REFRESH_ENTITY_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
})
RESET_NODE_METERS_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Optional(const.ATTR_INSTANCE, default=1): vol.Coerce(int)
})
CHANGE_ASSOCIATION_SCHEMA = vol.Schema({
vol.Required(const.ATTR_ASSOCIATION): cv.string,
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_TARGET_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_GROUP): vol.Coerce(int),
vol.Optional(const.ATTR_INSTANCE, default=0x00): vol.Coerce(int)
})
SET_WAKEUP_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_VALUE):
vol.All(vol.Coerce(int), cv.positive_int),
})
HEAL_NODE_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Optional(const.ATTR_RETURN_ROUTES, default=False): cv.boolean,
})
TEST_NODE_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Optional(const.ATTR_MESSAGES, default=1): cv.positive_int,
})
DEVICE_CONFIG_SCHEMA_ENTRY = vol.Schema({
vol.Optional(CONF_POLLING_INTENSITY): cv.positive_int,
vol.Optional(CONF_IGNORED, default=DEFAULT_CONF_IGNORED): cv.boolean,
vol.Optional(CONF_INVERT_OPENCLOSE_BUTTONS,
default=DEFAULT_CONF_INVERT_OPENCLOSE_BUTTONS): cv.boolean,
vol.Optional(CONF_REFRESH_VALUE, default=DEFAULT_CONF_REFRESH_VALUE):
cv.boolean,
vol.Optional(CONF_REFRESH_DELAY, default=DEFAULT_CONF_REFRESH_DELAY):
cv.positive_int
})
SIGNAL_REFRESH_ENTITY_FORMAT = 'zwave_refresh_entity_{}'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_AUTOHEAL, default=DEFAULT_CONF_AUTOHEAL): cv.boolean,
vol.Optional(CONF_CONFIG_PATH): cv.string,
vol.Optional(CONF_NETWORK_KEY):
vol.All(cv.string, vol.Match(r'(0x\w\w,\s?){15}0x\w\w')),
vol.Optional(CONF_DEVICE_CONFIG, default={}):
vol.Schema({cv.entity_id: DEVICE_CONFIG_SCHEMA_ENTRY}),
vol.Optional(CONF_DEVICE_CONFIG_GLOB, default={}):
vol.Schema({cv.string: DEVICE_CONFIG_SCHEMA_ENTRY}),
vol.Optional(CONF_DEVICE_CONFIG_DOMAIN, default={}):
vol.Schema({cv.string: DEVICE_CONFIG_SCHEMA_ENTRY}),
vol.Optional(CONF_DEBUG, default=DEFAULT_DEBUG): cv.boolean,
vol.Optional(CONF_POLLING_INTERVAL, default=DEFAULT_POLLING_INTERVAL):
cv.positive_int,
vol.Optional(CONF_USB_STICK_PATH): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
def _obj_to_dict(obj):
"""Convert an object into a hash for debug."""
return {key: getattr(obj, key) for key
in dir(obj)
if key[0] != '_' and not callable(getattr(obj, key))}
def _value_name(value):
"""Return the name of the value."""
return '{} {}'.format(node_name(value.node), value.label).strip()
def nice_print_node(node):
"""Print a nice formatted node to the output (debug method)."""
node_dict = _obj_to_dict(node)
node_dict['values'] = {value_id: _obj_to_dict(value)
for value_id, value in node.values.items()}
_LOGGER.info("FOUND NODE %s \n"
"%s", node.product_name, node_dict)
def get_config_value(node, value_index, tries=5):
"""Return the current configuration value for a specific index."""
try:
for value in node.values.values():
if (value.command_class == const.COMMAND_CLASS_CONFIGURATION
and value.index == value_index):
return value.data
except RuntimeError:
# If we get a runtime error the dict has changed while
# we was looking for a value, just do it again
return None if tries <= 0 else get_config_value(
node, value_index, tries=tries - 1)
return None
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the Z-Wave platform (generic part)."""
if discovery_info is None or DATA_NETWORK not in hass.data:
return False
device = hass.data[DATA_DEVICES].get(
discovery_info[const.DISCOVERY_DEVICE], None)
if device is None:
return False
async_add_entities([device])
return True
async def async_setup(hass, config):
"""Set up Z-Wave components."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
hass.data[DATA_ZWAVE_CONFIG] = conf
if not hass.config_entries.async_entries(DOMAIN):
hass.async_create_task(hass.config_entries.flow.async_init(
DOMAIN, context={'source': config_entries.SOURCE_IMPORT},
data={
CONF_USB_STICK_PATH: conf.get(
CONF_USB_STICK_PATH, DEFAULT_CONF_USB_STICK_PATH),
CONF_NETWORK_KEY: conf.get(CONF_NETWORK_KEY),
}
))
return True
async def async_setup_entry(hass, config_entry):
"""Set up Z-Wave from a config entry.
Will automatically load components to support devices found on the network.
"""
from pydispatch import dispatcher
# pylint: disable=import-error
from openzwave.option import ZWaveOption
from openzwave.network import ZWaveNetwork
from openzwave.group import ZWaveGroup
config = {}
if DATA_ZWAVE_CONFIG in hass.data:
config = hass.data[DATA_ZWAVE_CONFIG]
# Load configuration
use_debug = config.get(CONF_DEBUG, DEFAULT_DEBUG)
autoheal = config.get(CONF_AUTOHEAL,
DEFAULT_CONF_AUTOHEAL)
device_config = EntityValues(
config.get(CONF_DEVICE_CONFIG),
config.get(CONF_DEVICE_CONFIG_DOMAIN),
config.get(CONF_DEVICE_CONFIG_GLOB))
usb_path = config.get(
CONF_USB_STICK_PATH, config_entry.data[CONF_USB_STICK_PATH])
_LOGGER.info('Z-Wave USB path is %s', usb_path)
# Setup options
options = ZWaveOption(
usb_path,
user_path=hass.config.config_dir,
config_path=config.get(CONF_CONFIG_PATH))
options.set_console_output(use_debug)
if config_entry.data.get(CONF_NETWORK_KEY):
options.addOption("NetworkKey", config_entry.data[CONF_NETWORK_KEY])
await hass.async_add_executor_job(options.lock)
network = hass.data[DATA_NETWORK] = ZWaveNetwork(options, autostart=False)
hass.data[DATA_DEVICES] = {}
hass.data[DATA_ENTITY_VALUES] = []
if use_debug: # pragma: no cover
def log_all(signal, value=None):
"""Log all the signals."""
print("")
print("SIGNAL *****", signal)
if value and signal in (ZWaveNetwork.SIGNAL_VALUE_CHANGED,
ZWaveNetwork.SIGNAL_VALUE_ADDED,
ZWaveNetwork.SIGNAL_SCENE_EVENT,
ZWaveNetwork.SIGNAL_NODE_EVENT,
ZWaveNetwork.SIGNAL_AWAKE_NODES_QUERIED,
ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED,
ZWaveNetwork
.SIGNAL_ALL_NODES_QUERIED_SOME_DEAD):
pprint(_obj_to_dict(value))
print("")
dispatcher.connect(log_all, weak=False)
def value_added(node, value):
"""Handle new added value to a node on the network."""
# Check if this value should be tracked by an existing entity
for values in hass.data[DATA_ENTITY_VALUES]:
values.check_value(value)
for schema in DISCOVERY_SCHEMAS:
if not check_node_schema(node, schema):
continue
if not check_value_schema(
value,
schema[const.DISC_VALUES][const.DISC_PRIMARY]):
continue
values = ZWaveDeviceEntityValues(
hass, schema, value, config, device_config, registry)
# We create a new list and update the reference here so that
# the list can be safely iterated over in the main thread
new_values = hass.data[DATA_ENTITY_VALUES] + [values]
hass.data[DATA_ENTITY_VALUES] = new_values
component = EntityComponent(_LOGGER, DOMAIN, hass)
registry = await async_get_registry(hass)
def node_added(node):
"""Handle a new node on the network."""
entity = ZWaveNodeEntity(node, network)
def _add_node_to_component():
if hass.data[DATA_DEVICES].get(entity.unique_id):
return
name = node_name(node)
generated_id = generate_entity_id(DOMAIN + '.{}', name, [])
node_config = device_config.get(generated_id)
if node_config.get(CONF_IGNORED):
_LOGGER.info(
"Ignoring node entity %s due to device settings",
generated_id)
return
hass.data[DATA_DEVICES][entity.unique_id] = entity
component.add_entities([entity])
if entity.unique_id:
_add_node_to_component()
return
@callback
def _on_ready(sec):
_LOGGER.info("Z-Wave node %d ready after %d seconds",
entity.node_id, sec)
hass.async_add_job(_add_node_to_component)
@callback
def _on_timeout(sec):
_LOGGER.warning(
"Z-Wave node %d not ready after %d seconds, "
"continuing anyway",
entity.node_id, sec)
hass.async_add_job(_add_node_to_component)
hass.add_job(check_has_unique_id, entity, _on_ready, _on_timeout,
hass.loop)
def node_removed(node):
node_id = node.node_id
node_key = 'node-{}'.format(node_id)
_LOGGER.info("Node Removed: %s",
hass.data[DATA_DEVICES][node_key])
for key in list(hass.data[DATA_DEVICES]):
if not key.startswith('{}-'.format(node_id)):
continue
entity = hass.data[DATA_DEVICES][key]
_LOGGER.info('Removing Entity - value: %s - entity_id: %s',
key, entity.entity_id)
hass.add_job(entity.node_removed())
del hass.data[DATA_DEVICES][key]
entity = hass.data[DATA_DEVICES][node_key]
hass.add_job(entity.node_removed())
del hass.data[DATA_DEVICES][node_key]
def network_ready():
"""Handle the query of all awake nodes."""
_LOGGER.info("Z-Wave network is ready for use. All awake nodes "
"have been queried. Sleeping nodes will be "
"queried when they awake.")
hass.bus.fire(const.EVENT_NETWORK_READY)
def network_complete():
"""Handle the querying of all nodes on network."""
_LOGGER.info("Z-Wave network is complete. All nodes on the network "
"have been queried")
hass.bus.fire(const.EVENT_NETWORK_COMPLETE)
def network_complete_some_dead():
"""Handle the querying of all nodes on network."""
_LOGGER.info("Z-Wave network is complete. All nodes on the network "
"have been queried, but some nodes are marked dead")
hass.bus.fire(const.EVENT_NETWORK_COMPLETE_SOME_DEAD)
dispatcher.connect(
value_added, ZWaveNetwork.SIGNAL_VALUE_ADDED, weak=False)
dispatcher.connect(
node_added, ZWaveNetwork.SIGNAL_NODE_ADDED, weak=False)
dispatcher.connect(
node_removed, ZWaveNetwork.SIGNAL_NODE_REMOVED, weak=False)
dispatcher.connect(
network_ready, ZWaveNetwork.SIGNAL_AWAKE_NODES_QUERIED, weak=False)
dispatcher.connect(
network_complete, ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED, weak=False)
dispatcher.connect(
network_complete_some_dead,
ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED_SOME_DEAD, weak=False)
def add_node(service):
"""Switch into inclusion mode."""
_LOGGER.info("Z-Wave add_node have been initialized")
network.controller.add_node()
def add_node_secure(service):
"""Switch into secure inclusion mode."""
_LOGGER.info("Z-Wave add_node_secure have been initialized")
network.controller.add_node(True)
def remove_node(service):
"""Switch into exclusion mode."""
_LOGGER.info("Z-Wave remove_node have been initialized")
network.controller.remove_node()
def cancel_command(service):
"""Cancel a running controller command."""
_LOGGER.info("Cancel running Z-Wave command")
network.controller.cancel_command()
def heal_network(service):
"""Heal the network."""
_LOGGER.info("Z-Wave heal running")
network.heal()
def soft_reset(service):
"""Soft reset the controller."""
_LOGGER.info("Z-Wave soft_reset have been initialized")
network.controller.soft_reset()
def update_config(service):
"""Update the config from git."""
_LOGGER.info("Configuration update has been initialized")
network.controller.update_ozw_config()
def test_network(service):
"""Test the network by sending commands to all the nodes."""
_LOGGER.info("Z-Wave test_network have been initialized")
network.test()
def stop_network(_service_or_event):
"""Stop Z-Wave network."""
_LOGGER.info("Stopping Z-Wave network")
network.stop()
if hass.state == CoreState.running:
hass.bus.fire(const.EVENT_NETWORK_STOP)
def rename_node(service):
"""Rename a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
name = service.data.get(const.ATTR_NAME)
node.name = name
_LOGGER.info(
"Renamed Z-Wave node %d to %s", node_id, name)
def rename_value(service):
"""Rename a node value."""
node_id = service.data.get(const.ATTR_NODE_ID)
value_id = service.data.get(const.ATTR_VALUE_ID)
node = network.nodes[node_id]
value = node.values[value_id]
name = service.data.get(const.ATTR_NAME)
value.label = name
_LOGGER.info(
"Renamed Z-Wave value (Node %d Value %d) to %s",
node_id, value_id, name)
def set_poll_intensity(service):
"""Set the polling intensity of a node value."""
node_id = service.data.get(const.ATTR_NODE_ID)
value_id = service.data.get(const.ATTR_VALUE_ID)
node = network.nodes[node_id]
value = node.values[value_id]
intensity = service.data.get(const.ATTR_POLL_INTENSITY)
if intensity == 0:
if value.disable_poll():
_LOGGER.info("Polling disabled (Node %d Value %d)",
node_id, value_id)
return
_LOGGER.info("Polling disabled failed (Node %d Value %d)",
node_id, value_id)
else:
if value.enable_poll(intensity):
_LOGGER.info(
"Set polling intensity (Node %d Value %d) to %s",
node_id, value_id, intensity)
return
_LOGGER.info("Set polling intensity failed (Node %d Value %d)",
node_id, value_id)
def remove_failed_node(service):
"""Remove failed node."""
node_id = service.data.get(const.ATTR_NODE_ID)
_LOGGER.info("Trying to remove zwave node %d", node_id)
network.controller.remove_failed_node(node_id)
def replace_failed_node(service):
"""Replace failed node."""
node_id = service.data.get(const.ATTR_NODE_ID)
_LOGGER.info("Trying to replace zwave node %d", node_id)
network.controller.replace_failed_node(node_id)
def set_config_parameter(service):
"""Set a config parameter to a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
param = service.data.get(const.ATTR_CONFIG_PARAMETER)
selection = service.data.get(const.ATTR_CONFIG_VALUE)
size = service.data.get(const.ATTR_CONFIG_SIZE)
for value in (
node.get_values(class_id=const.COMMAND_CLASS_CONFIGURATION)
.values()):
if value.index != param:
continue
if value.type == const.TYPE_BOOL:
value.data = int(selection == 'True')
_LOGGER.info("Setting config parameter %s on Node %s "
"with bool selection %s", param, node_id,
str(selection))
return
if value.type == const.TYPE_LIST:
value.data = str(selection)
_LOGGER.info("Setting config parameter %s on Node %s "
"with list selection %s", param, node_id,
str(selection))
return
if value.type == const.TYPE_BUTTON:
network.manager.pressButton(value.value_id)
network.manager.releaseButton(value.value_id)
_LOGGER.info("Setting config parameter %s on Node %s "
"with button selection %s", param, node_id,
selection)
return
value.data = int(selection)
_LOGGER.info("Setting config parameter %s on Node %s "
"with selection %s", param, node_id,
selection)
return
node.set_config_param(param, selection, size)
_LOGGER.info("Setting unknown config parameter %s on Node %s "
"with selection %s", param, node_id,
selection)
def refresh_node_value(service):
"""Refresh the specified value from a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
value_id = service.data.get(const.ATTR_VALUE_ID)
node = network.nodes[node_id]
node.values[value_id].refresh()
_LOGGER.info("Node %s value %s refreshed", node_id, value_id)
def set_node_value(service):
"""Set the specified value on a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
value_id = service.data.get(const.ATTR_VALUE_ID)
value = service.data.get(const.ATTR_CONFIG_VALUE)
node = network.nodes[node_id]
node.values[value_id].data = value
_LOGGER.info("Node %s value %s set to %s", node_id, value_id, value)
def print_config_parameter(service):
"""Print a config parameter from a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
param = service.data.get(const.ATTR_CONFIG_PARAMETER)
_LOGGER.info("Config parameter %s on Node %s: %s",
param, node_id, get_config_value(node, param))
def print_node(service):
"""Print all information about z-wave node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
nice_print_node(node)
def set_wakeup(service):
"""Set wake-up interval of a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
value = service.data.get(const.ATTR_CONFIG_VALUE)
if node.can_wake_up():
for value_id in node.get_values(
class_id=const.COMMAND_CLASS_WAKE_UP):
node.values[value_id].data = value
_LOGGER.info("Node %s wake-up set to %d", node_id, value)
else:
_LOGGER.info("Node %s is not wakeable", node_id)
def change_association(service):
"""Change an association in the zwave network."""
association_type = service.data.get(const.ATTR_ASSOCIATION)
node_id = service.data.get(const.ATTR_NODE_ID)
target_node_id = service.data.get(const.ATTR_TARGET_NODE_ID)
group = service.data.get(const.ATTR_GROUP)
instance = service.data.get(const.ATTR_INSTANCE)
node = ZWaveGroup(group, network, node_id)
if association_type == 'add':
node.add_association(target_node_id, instance)
_LOGGER.info("Adding association for node:%s in group:%s "
"target node:%s, instance=%s", node_id, group,
target_node_id, instance)
if association_type == 'remove':
node.remove_association(target_node_id, instance)
_LOGGER.info("Removing association for node:%s in group:%s "
"target node:%s, instance=%s", node_id, group,
target_node_id, instance)
async def async_refresh_entity(service):
"""Refresh values that specific entity depends on."""
entity_id = service.data.get(ATTR_ENTITY_ID)
async_dispatcher_send(
hass, SIGNAL_REFRESH_ENTITY_FORMAT.format(entity_id))
def refresh_node(service):
"""Refresh all node info."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id]
node.refresh_info()
def reset_node_meters(service):
"""Reset meter counters of a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
instance = service.data.get(const.ATTR_INSTANCE)
node = network.nodes[node_id]
for value in (
node.get_values(class_id=const.COMMAND_CLASS_METER)
.values()):
if value.index != const.INDEX_METER_RESET:
continue
if value.instance != instance:
continue
network.manager.pressButton(value.value_id)
network.manager.releaseButton(value.value_id)
_LOGGER.info("Resetting meters on node %s instance %s....",
node_id, instance)
return
_LOGGER.info("Node %s on instance %s does not have resettable "
"meters.", node_id, instance)
def heal_node(service):
"""Heal a node on the network."""
node_id = service.data.get(const.ATTR_NODE_ID)
update_return_routes = service.data.get(const.ATTR_RETURN_ROUTES)
node = network.nodes[node_id]
_LOGGER.info("Z-Wave node heal running for node %s", node_id)
node.heal(update_return_routes)
def test_node(service):
"""Send test messages to a node on the network."""
node_id = service.data.get(const.ATTR_NODE_ID)
messages = service.data.get(const.ATTR_MESSAGES)
node = network.nodes[node_id]
_LOGGER.info("Sending %s test-messages to node %s.", messages, node_id)
node.test(messages)
def start_zwave(_service_or_event):
"""Startup Z-Wave network."""
_LOGGER.info("Starting Z-Wave network...")
network.start()
hass.bus.fire(const.EVENT_NETWORK_START)
async def _check_awaked():
"""Wait for Z-wave awaked state (or timeout) and finalize start."""
_LOGGER.debug(
"network state: %d %s", network.state,
network.state_str)
start_time = dt_util.utcnow()
while True:
waited = int((dt_util.utcnow()-start_time).total_seconds())
if network.state >= network.STATE_AWAKED:
# Need to be in STATE_AWAKED before talking to nodes.
_LOGGER.info("Z-Wave ready after %d seconds", waited)
break
elif waited >= const.NETWORK_READY_WAIT_SECS:
# Wait up to NETWORK_READY_WAIT_SECS seconds for the Z-Wave
# network to be ready.
_LOGGER.warning(
"Z-Wave not ready after %d seconds, continuing anyway",
waited)
_LOGGER.info(
"final network state: %d %s", network.state,
network.state_str)
break
else:
await asyncio.sleep(1)
hass.async_add_job(_finalize_start)
hass.add_job(_check_awaked)
def _finalize_start():
"""Perform final initializations after Z-Wave network is awaked."""
polling_interval = convert(
config.get(CONF_POLLING_INTERVAL), int)
if polling_interval is not None:
network.set_poll_interval(polling_interval, False)
poll_interval = network.get_poll_interval()
_LOGGER.info("Z-Wave polling interval set to %d ms", poll_interval)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_network)
# Register node services for Z-Wave network
hass.services.register(DOMAIN, const.SERVICE_ADD_NODE, add_node)
hass.services.register(DOMAIN, const.SERVICE_ADD_NODE_SECURE,
add_node_secure)
hass.services.register(DOMAIN, const.SERVICE_REMOVE_NODE, remove_node)
hass.services.register(DOMAIN, const.SERVICE_CANCEL_COMMAND,
cancel_command)
hass.services.register(DOMAIN, const.SERVICE_HEAL_NETWORK,
heal_network)
hass.services.register(DOMAIN, const.SERVICE_SOFT_RESET, soft_reset)
hass.services.register(DOMAIN, const.SERVICE_UPDATE_CONFIG,
update_config)
hass.services.register(DOMAIN, const.SERVICE_TEST_NETWORK,
test_network)
hass.services.register(DOMAIN, const.SERVICE_STOP_NETWORK,
stop_network)
hass.services.register(DOMAIN, const.SERVICE_RENAME_NODE, rename_node,
schema=RENAME_NODE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_RENAME_VALUE,
rename_value,
schema=RENAME_VALUE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_SET_CONFIG_PARAMETER,
set_config_parameter,
schema=SET_CONFIG_PARAMETER_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_SET_NODE_VALUE,
set_node_value,
schema=SET_NODE_VALUE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_REFRESH_NODE_VALUE,
refresh_node_value,
schema=REFRESH_NODE_VALUE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_PRINT_CONFIG_PARAMETER,
print_config_parameter,
schema=PRINT_CONFIG_PARAMETER_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_REMOVE_FAILED_NODE,
remove_failed_node,
schema=NODE_SERVICE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_REPLACE_FAILED_NODE,
replace_failed_node,
schema=NODE_SERVICE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_CHANGE_ASSOCIATION,
change_association,
schema=CHANGE_ASSOCIATION_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_SET_WAKEUP,
set_wakeup,
schema=SET_WAKEUP_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_PRINT_NODE,
print_node,
schema=NODE_SERVICE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_REFRESH_ENTITY,
async_refresh_entity,
schema=REFRESH_ENTITY_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_REFRESH_NODE,
refresh_node,
schema=NODE_SERVICE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_RESET_NODE_METERS,
reset_node_meters,
schema=RESET_NODE_METERS_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_SET_POLL_INTENSITY,
set_poll_intensity,
schema=SET_POLL_INTENSITY_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_HEAL_NODE,
heal_node,
schema=HEAL_NODE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_TEST_NODE,
test_node,
schema=TEST_NODE_SCHEMA)
# Setup autoheal
if autoheal:
_LOGGER.info("Z-Wave network autoheal is enabled")
async_track_time_change(hass, heal_network, hour=0, minute=0, second=0)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_zwave)
hass.services.async_register(DOMAIN, const.SERVICE_START_NETWORK,
start_zwave)
for entry_component in SUPPORTED_PLATFORMS:
hass.async_create_task(hass.config_entries.async_forward_entry_setup(
config_entry, entry_component))
return True
class ZWaveDeviceEntityValues():
"""Manages entity access to the underlying zwave value objects."""
def __init__(self, hass, schema, primary_value, zwave_config,
device_config, registry):
"""Initialize the values object with the passed entity schema."""
self._hass = hass
self._zwave_config = zwave_config
self._device_config = device_config
self._schema = copy.deepcopy(schema)
self._values = {}
self._entity = None
self._workaround_ignore = False
self._registry = registry
for name in self._schema[const.DISC_VALUES].keys():
self._values[name] = None
self._schema[const.DISC_VALUES][name][const.DISC_INSTANCE] = \
[primary_value.instance]
self._values[const.DISC_PRIMARY] = primary_value
self._node = primary_value.node
self._schema[const.DISC_NODE_ID] = [self._node.node_id]
# Check values that have already been discovered for node
for value in self._node.values.values():
self.check_value(value)
self._check_entity_ready()
def __getattr__(self, name):
"""Get the specified value for this entity."""
return self._values[name]
def __iter__(self):
"""Allow iteration over all values."""
return iter(self._values.values())
def check_value(self, value):
"""Check if the new value matches a missing value for this entity.
If a match is found, it is added to the values mapping.
"""
if not check_node_schema(value.node, self._schema):
return
for name in self._values:
if self._values[name] is not None:
continue
if not check_value_schema(
value, self._schema[const.DISC_VALUES][name]):
continue
self._values[name] = value
if self._entity:
self._entity.value_added()
self._entity.value_changed()
self._check_entity_ready()
def _check_entity_ready(self):
"""Check if all required values are discovered and create entity."""
if self._workaround_ignore:
return
if self._entity is not None:
return
for name in self._schema[const.DISC_VALUES]:
if self._values[name] is None and \
not self._schema[const.DISC_VALUES][name].get(
const.DISC_OPTIONAL):
return
component = self._schema[const.DISC_COMPONENT]
workaround_component = workaround.get_device_component_mapping(
self.primary)
if workaround_component and workaround_component != component:
if workaround_component == workaround.WORKAROUND_IGNORE:
_LOGGER.info("Ignoring Node %d Value %d due to workaround.",
self.primary.node.node_id, self.primary.value_id)
# No entity will be created for this value
self._workaround_ignore = True
return
_LOGGER.debug("Using %s instead of %s",
workaround_component, component)
component = workaround_component
entity_id = self._registry.async_get_entity_id(
component, DOMAIN,
compute_value_unique_id(self._node, self.primary))
if entity_id is None:
value_name = _value_name(self.primary)
entity_id = generate_entity_id(component + '.{}', value_name, [])
node_config = self._device_config.get(entity_id)
# Configure node
_LOGGER.debug("Adding Node_id=%s Generic_command_class=%s, "
"Specific_command_class=%s, "
"Command_class=%s, Value type=%s, "
"Genre=%s as %s", self._node.node_id,
self._node.generic, self._node.specific,
self.primary.command_class, self.primary.type,
self.primary.genre, component)
if node_config.get(CONF_IGNORED):
_LOGGER.info(
"Ignoring entity %s due to device settings", entity_id)
# No entity will be created for this value
self._workaround_ignore = True
return
polling_intensity = convert(
node_config.get(CONF_POLLING_INTENSITY), int)
if polling_intensity:
self.primary.enable_poll(polling_intensity)
platform = import_module('.{}'.format(component),
__name__)
device = platform.get_device(
node=self._node, values=self,
node_config=node_config, hass=self._hass)
if device is None:
# No entity will be created for this value
self._workaround_ignore = True
return
self._entity = device
@callback
def _on_ready(sec):
_LOGGER.info(
"Z-Wave entity %s (node_id: %d) ready after %d seconds",
device.name, self._node.node_id, sec)
self._hass.async_add_job(discover_device, component, device)
@callback
def _on_timeout(sec):
_LOGGER.warning(
"Z-Wave entity %s (node_id: %d) not ready after %d seconds, "
"continuing anyway",
device.name, self._node.node_id, sec)
self._hass.async_add_job(discover_device, component, device)
async def discover_device(component, device):
"""Put device in a dictionary and call discovery on it."""
if self._hass.data[DATA_DEVICES].get(device.unique_id):
return
self._hass.data[DATA_DEVICES][device.unique_id] = device
if component in SUPPORTED_PLATFORMS:
async_dispatcher_send(
self._hass, 'zwave_new_{}'.format(component), device)
else:
await discovery.async_load_platform(
self._hass, component, DOMAIN,
{const.DISCOVERY_DEVICE: device.unique_id},
self._zwave_config)
if device.unique_id:
self._hass.add_job(discover_device, component, device)
else:
self._hass.add_job(check_has_unique_id, device, _on_ready,
_on_timeout, self._hass.loop)
class ZWaveDeviceEntity(ZWaveBaseEntity):
"""Representation of a Z-Wave node entity."""
def __init__(self, values, domain):
"""Initialize the z-Wave device."""
# pylint: disable=import-error
super().__init__()
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
self.values = values
self.node = values.primary.node
self.values.primary.set_change_verified(False)
self._name = _value_name(self.values.primary)
self._unique_id = self._compute_unique_id()
self._update_attributes()
dispatcher.connect(
self.network_value_changed, ZWaveNetwork.SIGNAL_VALUE_CHANGED)
def network_value_changed(self, value):
"""Handle a value change on the network."""
if value.value_id in [v.value_id for v in self.values if v]:
return self.value_changed()
def value_added(self):
"""Handle a new value of this entity."""
pass
def value_changed(self):
"""Handle a changed value for this entity's node."""
self._update_attributes()
self.update_properties()
self.maybe_schedule_update()
async def async_added_to_hass(self):
"""Add device to dict."""
async_dispatcher_connect(
self.hass,
SIGNAL_REFRESH_ENTITY_FORMAT.format(self.entity_id),
self.refresh_from_network)
def _update_attributes(self):
"""Update the node attributes. May only be used inside callback."""
self.node_id = self.node.node_id
self._name = _value_name(self.values.primary)
if not self._unique_id:
self._unique_id = self._compute_unique_id()
if self._unique_id:
self.try_remove_and_add()
if self.values.power:
self.power_consumption = round(
self.values.power.data, self.values.power.precision)
else:
self.power_consumption = None
def update_properties(self):
"""Update on data changes for node values."""
pass
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def device_info(self):
"""Return device information."""
return {
'identifiers': {
(DOMAIN, self.node_id)
},
'manufacturer': self.node.manufacturer_name,
'model': self.node.product_name,
'name': node_name(self.node),
}
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
attrs = {
const.ATTR_NODE_ID: self.node_id,
const.ATTR_VALUE_INDEX: self.values.primary.index,
const.ATTR_VALUE_INSTANCE: self.values.primary.instance,
const.ATTR_VALUE_ID: str(self.values.primary.value_id),
}
if self.power_consumption is not None:
attrs[ATTR_POWER] = self.power_consumption
return attrs
def refresh_from_network(self):
"""Refresh all dependent values from zwave network."""
for value in self.values:
if value is not None:
self.node.refresh_value(value.value_id)
def _compute_unique_id(self):
if (is_node_parsed(self.node) and
self.values.primary.label != "Unknown") or \
self.node.is_ready:
return compute_value_unique_id(self.node, self.values.primary)
return None
def compute_value_unique_id(node, value):
"""Compute unique_id a value would get if it were to get one."""
return "{}-{}".format(node.node_id, value.object_id)
| apache-2.0 | -1,543,759,490,632,505,300 | 38.39095 | 79 | 0.59099 | false |
Luindil/Glassure | glassure/gui/widgets/control/transfer.py | 1 | 3814 | # -*- coding: utf-8 -*-
from ...qt import QtWidgets, QtCore
from ..custom import FlatButton, HorizontalLine, LabelAlignRight
class TransferFunctionWidget(QtWidgets.QWidget):
def __init__(self, *args):
super(TransferFunctionWidget, self).__init__(*args)
self.create_widgets()
self.create_layout()
self.style_widgets()
self.create_signals()
def create_widgets(self):
self.load_std_btn = FlatButton("Load Std")
self.load_std_bkg_btn = FlatButton("Load Std Bkg")
self.load_sample_btn = FlatButton("Load Sample")
self.load_sample_bkg_btn = FlatButton("Load Sample Bkg")
self.std_filename_lbl = LabelAlignRight('')
self.std_bkg_filename_lbl = LabelAlignRight("")
self.sample_filename_lbl = LabelAlignRight("")
self.sample_bkg_filename_lbl = LabelAlignRight("")
self.std_bkg_scaling_sb = QtWidgets.QDoubleSpinBox()
self.std_bkg_scaling_sb.setValue(1.0)
self.std_bkg_scaling_sb.setSingleStep(0.01)
self.sample_bkg_scaling_sb = QtWidgets.QDoubleSpinBox()
self.sample_bkg_scaling_sb.setValue(1.0)
self.sample_bkg_scaling_sb.setSingleStep(0.01)
self.smooth_sb = QtWidgets.QDoubleSpinBox()
self.smooth_sb.setValue(1.0)
self.smooth_sb.setSingleStep(0.1)
def create_layout(self):
self.main_layout = QtWidgets.QVBoxLayout()
self.activate_cb = QtWidgets.QCheckBox("activate")
self.main_layout.addWidget(self.activate_cb)
self.main_layout.addWidget(HorizontalLine())
self.transfer_layout = QtWidgets.QGridLayout()
self.transfer_layout.addWidget(self.load_sample_btn, 0, 0)
self.transfer_layout.addWidget(self.sample_filename_lbl, 0, 1)
self.transfer_layout.addWidget(self.load_sample_bkg_btn, 1, 0)
self.transfer_layout.addWidget(self.sample_bkg_filename_lbl, 1, 1)
self.transfer_layout.addWidget(self.load_std_btn, 2, 0)
self.transfer_layout.addWidget(self.std_filename_lbl, 2, 1)
self.transfer_layout.addWidget(self.load_std_bkg_btn, 3, 0)
self.transfer_layout.addWidget(self.std_bkg_filename_lbl, 3, 1)
self.scaling_gb = QtWidgets.QGroupBox("")
self.scaling_layout = QtWidgets.QGridLayout()
self.scaling_layout.addItem(QtWidgets.QSpacerItem(0, 0, QtWidgets.QSizePolicy.MinimumExpanding,
QtWidgets.QSizePolicy.Fixed), 0, 0)
self.scaling_layout.addWidget(LabelAlignRight("Sample bkg scaling:"), 0, 1)
self.scaling_layout.addWidget(self.sample_bkg_scaling_sb, 0, 2)
self.scaling_layout.addWidget(LabelAlignRight("Std bkg scaling:"), 1, 1)
self.scaling_layout.addWidget(self.std_bkg_scaling_sb, 1, 2)
self.scaling_layout.addWidget(LabelAlignRight("Smoothing:"), 2, 1)
self.scaling_layout.addWidget(self.smooth_sb, 2, 2)
self.scaling_gb.setLayout(self.scaling_layout)
self.transfer_layout.addWidget(self.scaling_gb, 4, 0, 1, 2)
self.main_layout.addLayout(self.transfer_layout)
self.setLayout(self.main_layout)
def style_widgets(self):
self.main_layout.setContentsMargins(0, 0, 0, 0)
self.main_layout.setSpacing(5)
self.transfer_layout.setContentsMargins(5, 5, 5, 5)
self.sample_bkg_scaling_sb.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.std_bkg_scaling_sb.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.smooth_sb.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.sample_bkg_scaling_sb.setMinimumWidth(75)
self.std_bkg_scaling_sb.setMinimumWidth(75)
self.smooth_sb.setMinimumWidth(75)
def create_signals(self):
pass
| mit | 7,296,712,362,812,249,000 | 41.377778 | 103 | 0.667016 | false |
Samsung/skia | bench/gen_bench_expectations.py | 2 | 5049 | #!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Generate bench_expectations file from a given set of bench data files. """
import argparse
import bench_util
import os
import re
import sys
# Parameters for calculating bench ranges.
RANGE_RATIO_UPPER = 1.5 # Ratio of range for upper bounds.
RANGE_RATIO_LOWER = 2.0 # Ratio of range for lower bounds.
ERR_RATIO = 0.08 # Further widens the range by the ratio of average value.
ERR_UB = 1.0 # Adds an absolute upper error to cope with small benches.
ERR_LB = 1.5
# List of bench configs to monitor. Ignore all other configs.
CONFIGS_TO_INCLUDE = ['simple_viewport_1000x1000',
'simple_viewport_1000x1000_angle',
'simple_viewport_1000x1000_gpu',
'simple_viewport_1000x1000_scalar_1.100000',
'simple_viewport_1000x1000_scalar_1.100000_gpu',
]
# List of flaky entries that should be excluded. Each entry is defined by a list
# of 3 strings, corresponding to the substrings of [bench, config, builder] to
# search for. A bench expectations line is excluded when each of the 3 strings
# in the list is a substring of the corresponding element of the given line. For
# instance, ['desk_yahooanswers', 'gpu', 'Ubuntu'] will skip expectation entries
# of SKP benchs whose name contains 'desk_yahooanswers' on all gpu-related
# configs of all Ubuntu builders.
ENTRIES_TO_EXCLUDE = [
]
def compute_ranges(benches):
"""Given a list of bench numbers, calculate the alert range.
Args:
benches: a list of float bench values.
Returns:
a list of float [lower_bound, upper_bound].
"""
minimum = min(benches)
maximum = max(benches)
diff = maximum - minimum
avg = sum(benches) / len(benches)
return [minimum - diff * RANGE_RATIO_LOWER - avg * ERR_RATIO - ERR_LB,
maximum + diff * RANGE_RATIO_UPPER + avg * ERR_RATIO + ERR_UB]
def create_expectations_dict(revision_data_points, builder):
"""Convert list of bench data points into a dictionary of expectations data.
Args:
revision_data_points: a list of BenchDataPoint objects.
builder: string of the corresponding buildbot builder name.
Returns:
a dictionary of this form:
keys = tuple of (config, bench) strings.
values = list of float [expected, lower_bound, upper_bound] for the key.
"""
bench_dict = {}
for point in revision_data_points:
if (point.time_type or # Not walltime which has time_type ''
not point.config in CONFIGS_TO_INCLUDE):
continue
to_skip = False
for bench_substr, config_substr, builder_substr in ENTRIES_TO_EXCLUDE:
if (bench_substr in point.bench and config_substr in point.config and
builder_substr in builder):
to_skip = True
break
if to_skip:
continue
key = (point.config, point.bench)
if key in bench_dict:
raise Exception('Duplicate bench entry: ' + str(key))
bench_dict[key] = [point.time] + compute_ranges(point.per_iter_time)
return bench_dict
def main():
"""Reads bench data points, then calculate and export expectations.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-a', '--representation_alg', default='25th',
help='bench representation algorithm to use, see bench_util.py.')
parser.add_argument(
'-b', '--builder', required=True,
help='name of the builder whose bench ranges we are computing.')
parser.add_argument(
'-d', '--input_dir', required=True,
help='a directory containing bench data files.')
parser.add_argument(
'-o', '--output_file', required=True,
help='file path and name for storing the output bench expectations.')
parser.add_argument(
'-r', '--git_revision', required=True,
help='the git hash to indicate the revision of input data to use.')
args = parser.parse_args()
builder = args.builder
data_points = bench_util.parse_skp_bench_data(
args.input_dir, args.git_revision, args.representation_alg)
expectations_dict = create_expectations_dict(data_points, builder)
out_lines = []
keys = expectations_dict.keys()
keys.sort()
for (config, bench) in keys:
(expected, lower_bound, upper_bound) = expectations_dict[(config, bench)]
out_lines.append('%(bench)s_%(config)s_,%(builder)s-%(representation)s,'
'%(expected)s,%(lower_bound)s,%(upper_bound)s' % {
'bench': bench,
'config': config,
'builder': builder,
'representation': args.representation_alg,
'expected': expected,
'lower_bound': lower_bound,
'upper_bound': upper_bound})
with open(args.output_file, 'w') as file_handle:
file_handle.write('\n'.join(out_lines))
if __name__ == "__main__":
main()
| bsd-3-clause | -3,046,671,847,493,979,000 | 35.323741 | 80 | 0.653001 | false |
alexgorban/models | official/recommendation/ncf_input_pipeline.py | 1 | 7065 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""NCF model input pipeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# pylint: disable=g-bad-import-order
import tensorflow.compat.v2 as tf
# pylint: enable=g-bad-import-order
from official.recommendation import constants as rconst
from official.recommendation import movielens
from official.recommendation import data_pipeline
NUM_SHARDS = 16
def create_dataset_from_tf_record_files(input_file_pattern,
pre_batch_size,
batch_size,
is_training=True):
"""Creates dataset from (tf)records files for training/evaluation."""
files = tf.data.Dataset.list_files(input_file_pattern, shuffle=is_training)
def make_dataset(files_dataset, shard_index):
"""Returns dataset for sharded tf record files."""
if pre_batch_size != batch_size:
raise ValueError("Pre-batch ({}) size is not equal to batch "
"size ({})".format(pre_batch_size, batch_size))
files_dataset = files_dataset.shard(NUM_SHARDS, shard_index)
dataset = files_dataset.interleave(tf.data.TFRecordDataset)
decode_fn = functools.partial(
data_pipeline.DatasetManager.deserialize,
batch_size=pre_batch_size,
is_training=is_training)
dataset = dataset.map(
decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
dataset = tf.data.Dataset.range(NUM_SHARDS)
map_fn = functools.partial(make_dataset, files)
dataset = dataset.interleave(
map_fn,
cycle_length=NUM_SHARDS,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def create_dataset_from_data_producer(producer, params):
"""Return dataset online-generating data."""
def preprocess_train_input(features, labels):
"""Pre-process the training data.
This is needed because
- The label needs to be extended to be used in the loss fn
- We need the same inputs for training and eval so adding fake inputs
for DUPLICATE_MASK in training data.
Args:
features: Dictionary of features for training.
labels: Training labels.
Returns:
Processed training features.
"""
fake_dup_mask = tf.zeros_like(features[movielens.USER_COLUMN])
features[rconst.DUPLICATE_MASK] = fake_dup_mask
features[rconst.TRAIN_LABEL_KEY] = labels
return features
train_input_fn = producer.make_input_fn(is_training=True)
train_input_dataset = train_input_fn(params).map(preprocess_train_input)
def preprocess_eval_input(features):
"""Pre-process the eval data.
This is needed because:
- The label needs to be extended to be used in the loss fn
- We need the same inputs for training and eval so adding fake inputs
for VALID_PT_MASK in eval data.
Args:
features: Dictionary of features for evaluation.
Returns:
Processed evaluation features.
"""
labels = tf.cast(tf.zeros_like(features[movielens.USER_COLUMN]), tf.bool)
fake_valid_pt_mask = tf.cast(
tf.zeros_like(features[movielens.USER_COLUMN]), tf.bool)
features[rconst.VALID_POINT_MASK] = fake_valid_pt_mask
features[rconst.TRAIN_LABEL_KEY] = labels
return features
eval_input_fn = producer.make_input_fn(is_training=False)
eval_input_dataset = eval_input_fn(params).map(preprocess_eval_input)
return train_input_dataset, eval_input_dataset
def create_ncf_input_data(params,
producer=None,
input_meta_data=None,
strategy=None):
"""Creates NCF training/evaluation dataset.
Args:
params: Dictionary containing parameters for train/evaluation data.
producer: Instance of BaseDataConstructor that generates data online. Must
not be None when params['train_dataset_path'] or
params['eval_dataset_path'] is not specified.
input_meta_data: A dictionary of input metadata to be used when reading data
from tf record files. Must be specified when params["train_input_dataset"]
is specified.
strategy: Distribution strategy used for distributed training. If specified,
used to assert that evaluation batch size is correctly a multiple of
total number of devices used.
Returns:
(training dataset, evaluation dataset, train steps per epoch,
eval steps per epoch)
Raises:
ValueError: If data is being generated online for when using TPU's.
"""
# NCF evaluation metric calculation logic assumes that evaluation data
# sample size are in multiples of (1 + number of negative samples in
# evaluation) for each device. As so, evaluation batch size must be a
# multiple of (number of replicas * (1 + number of negative samples)).
num_devices = strategy.num_replicas_in_sync if strategy else 1
if (params["eval_batch_size"] % (num_devices *
(1 + rconst.NUM_EVAL_NEGATIVES))):
raise ValueError("Evaluation batch size must be divisible by {} "
"times {}".format(num_devices,
(1 + rconst.NUM_EVAL_NEGATIVES)))
if params["train_dataset_path"]:
assert params["eval_dataset_path"]
train_dataset = create_dataset_from_tf_record_files(
params["train_dataset_path"],
input_meta_data["train_prebatch_size"],
params["batch_size"],
is_training=True)
eval_dataset = create_dataset_from_tf_record_files(
params["eval_dataset_path"],
input_meta_data["eval_prebatch_size"],
params["eval_batch_size"],
is_training=False)
num_train_steps = int(input_meta_data["num_train_steps"])
num_eval_steps = int(input_meta_data["num_eval_steps"])
else:
if params["use_tpu"]:
raise ValueError("TPU training does not support data producer yet. "
"Use pre-processed data.")
assert producer
# Start retrieving data from producer.
train_dataset, eval_dataset = create_dataset_from_data_producer(
producer, params)
num_train_steps = producer.train_batches_per_epoch
num_eval_steps = producer.eval_batches_per_epoch
return train_dataset, eval_dataset, num_train_steps, num_eval_steps
| apache-2.0 | 7,662,879,708,559,198,000 | 37.396739 | 80 | 0.675442 | false |
protwis/protwis | construct/tool.py | 1 | 81937 | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.db.models import Min, Count, Max
from django.conf import settings
from django.views.decorators.cache import cache_page
from django import forms
from construct.models import *
from structure.models import Structure
from protein.models import ProteinConformation, Protein, ProteinSegment, ProteinFamily
from alignment.models import AlignmentConsensus
from common.definitions import AMINO_ACIDS, AMINO_ACID_GROUPS, STRUCTURAL_RULES, STRUCTURAL_SWITCHES
import json
from collections import OrderedDict
import re
import xlrd
import yaml
import os
import time
import pickle
Alignment = getattr(__import__('common.alignment_' + settings.SITE_NAME, fromlist=['Alignment']), 'Alignment')
class FileUploadForm(forms.Form):
file_source = forms.FileField()
def parse_excel(path):
workbook = xlrd.open_workbook(path)
worksheets = workbook.sheet_names()
d = {}
for worksheet_name in worksheets:
if worksheet_name in d:
print('Error, worksheet with this name already loaded')
continue
d[worksheet_name] = {}
#d[worksheet_name] = OrderedDict()
worksheet = workbook.sheet_by_name(worksheet_name)
num_rows = worksheet.nrows - 1
num_cells = worksheet.ncols
curr_row = 0 #skip first, otherwise -1
headers = []
for i in range(num_cells):
h = worksheet.cell_value(0, i)
if h=="":
h = "i_"+str(i)
if h in headers:
h += "_"+str(i)
headers.append(worksheet.cell_value(0, i))
for curr_row in range(1,num_rows+1):
row = worksheet.row(curr_row)
key = worksheet.cell_value(curr_row, 0)
if key=='':
continue
if key not in d[worksheet_name]:
d[worksheet_name][key] = []
temprow = OrderedDict()
for curr_cell in range(num_cells):
cell_value = worksheet.cell_value(curr_row, curr_cell)
if headers[curr_cell] not in temprow:
temprow[headers[curr_cell]] = cell_value
d[worksheet_name][key].append(temprow)
return d
def compare_family_slug(a,b):
a = a.split("_")
b = b.split("_")
if a[0]!=b[0]:
if a[0] == '001':
# class A doesnt borrow from other classes
return -1, 'ignore'
elif a[0] == '002':
# if Class B1 only use if B2
if b[0]!= '003':
return -1, 'ignore'
elif a[0] == '003':
# if Class B1 only use if B2
if b[0]!= '002':
return -1, 'ignore'
elif a[0] == '004':
# if Class C ignore others
return -1, 'ignore'
elif a[0] == '006':
# if Class F ignore others
return -1, 'ignore'
elif a[0] == '007':
# if Class Taste take for A
if b[0]!= '001':
return -1, 'ignore'
return 0,"Different Class"
elif a[1]!=b[1]:
return 1,"Class"
elif a[2]!=b[2]:
# return 2,"Ligand Type" Ignore Ligand Type level for construct Design purposes.
return 1,"Class"
elif a[3]!=b[3]:
return 3,"Receptor Family"
else:
return 4,"Receptor"
def new_tool(request):
simple_selection = request.session.get('selection', False)
if simple_selection == False or not simple_selection.targets:
return redirect("/construct/design")
proteins = []
for target in simple_selection.targets:
if target.type == 'protein':
proteins.append(target.item)
context = {}
context['target'] = proteins[0]
level = proteins[0].family.slug
if level.split("_")[0]=='001':
c_level = 'A'
elif level.split("_")[0]=='002':
c_level = 'B'
elif level.split("_")[0]=='003':
c_level = 'B'
elif level.split("_")[0]=='004':
c_level = 'C'
elif level.split("_")[0]=='006':
c_level = 'F'
else:
c_level = ''
states = list(Structure.objects.filter(protein_conformation__protein__family__slug__startswith=level.split("_")[0]).all().values_list('state__slug', flat = True).distinct())
if 'active' in states:
active_xtals = True
else:
active_xtals = False
rs = Residue.objects.filter(protein_conformation__protein=proteins[0]).prefetch_related('protein_segment','display_generic_number','generic_number')
residues = {}
residues_gn = {}
residues_pos = {}
for r in rs:
segment = r.protein_segment.slug
segment = segment.replace("-","")
if segment not in residues:
residues[segment] = []
residues[segment].append(r)
label = ''
if r.generic_number:
residues_gn[r.generic_number.label] = r
label = r.display_generic_number.label
residues_pos[r.sequence_number] = [r.amino_acid,r.protein_segment.slug,label]
cons = Construct.objects.all().prefetch_related('crystal', 'protein__family','deletions','structure__state','insertions__insert_type')
inserts = {}
inserts['fusions'] = []
inserts['other'] = {}
for ins in ConstructInsertionType.objects.all().order_by('name','subtype'):
# print(ins.name,ins.subtype,ins.sequence)
if ins.name == 'fusion':
inserts['fusions'].append(ins.subtype)
else:
if ins.name not in inserts['other']:
inserts['other'][ins.name] = []
if ins.subtype not in inserts['other'][ins.name]:
inserts['other'][ins.name].append(ins.subtype)
# fusion, f_results = c.fusion()
# if fusion:
# f_protein = f_results[0][2]
# if f_protein not in inserts['fusions']:
# inserts['fusions'].append(f_protein)
# else:
# for ins in c.insertions.all():
# print(ins)
context['ICL_max'] = {'ICL2': residues['ICL2'][-1].sequence_number, 'ICL3': residues['ICL3'][-1].sequence_number}
context['ICL_min'] = {'ICL2': residues['ICL2'][0].sequence_number,'ICL3': residues['ICL3'][0].sequence_number}
context['residues'] = residues
context['residues_gn'] = residues_gn
context['residues_pos'] = residues_pos
context['class'] = c_level
context['active_xtals'] = active_xtals
context['inserts'] = inserts
context['form'] = FileUploadForm
context['signal_p'] = None
path_to_signal_p = os.sep.join([settings.BASE_DIR, "construct","signal_p.txt"])
with open(path_to_signal_p, "r", encoding='UTF-8') as signal_p:
for row in signal_p:
r = row.split()
if r[0]==proteins[0].entry_name:
context['signal_p'] = r[4]
print(row.split())
#print(residues)
return render(request,'new_tool.html',context)
def tool(request):
simple_selection = request.session.get('selection', False)
proteins = []
for target in simple_selection.targets:
if target.type == 'protein':
proteins.append(target.item)
print(proteins)
context = {}
context['target'] = proteins[0]
level = proteins[0].family.slug
if level.split("_")[0]=='001':
c_level = 'A'
elif level.split("_")[0]=='002':
c_level = 'B'
elif level.split("_")[0]=='003':
c_level = 'B'
elif level.split("_")[0]=='004':
c_level = 'C'
elif level.split("_")[0]=='006':
c_level = 'F'
else:
c_level = ''
states = list(Structure.objects.filter(protein_conformation__protein__family__slug__startswith=level.split("_")[0]).all().values_list('state__slug', flat = True).distinct())
if 'active' in states:
active_xtals = True
else:
active_xtals = False
rs = Residue.objects.filter(protein_conformation__protein=proteins[0]).prefetch_related('protein_segment','display_generic_number','generic_number')
residues = {}
residues_gn = {}
residues_pos = {}
for r in rs:
segment = r.protein_segment.slug
segment = segment.replace("-","")
if segment not in residues:
residues[segment] = []
residues[segment].append(r)
label = ''
if r.generic_number:
residues_gn[r.generic_number.label] = r
label = r.display_generic_number.label
residues_pos[r.sequence_number] = [r.amino_acid,r.protein_segment.slug,label]
cons = Construct.objects.all().prefetch_related('crystal', 'protein__family','deletions','structure__state','insertions__insert_type')
inserts = {}
inserts['fusions'] = []
inserts['other'] = {}
for ins in ConstructInsertionType.objects.all().order_by('name','subtype'):
# print(ins.name,ins.subtype,ins.sequence)
if ins.name == 'fusion':
inserts['fusions'].append(ins.subtype)
else:
if ins.name not in inserts['other']:
inserts['other'][ins.name] = []
if ins.subtype not in inserts['other'][ins.name]:
inserts['other'][ins.name].append(ins.subtype)
# fusion, f_results = c.fusion()
# if fusion:
# f_protein = f_results[0][2]
# if f_protein not in inserts['fusions']:
# inserts['fusions'].append(f_protein)
# else:
# for ins in c.insertions.all():
# print(ins)
print(inserts)
context['residues'] = residues
context['residues_gn'] = residues_gn
context['residues_pos'] = residues_pos
context['class'] = c_level
context['active_xtals'] = active_xtals
context['inserts'] = inserts
context['form'] = FileUploadForm
#print(residues)
return render(request,'tool.html',context)
@cache_page(60 * 60 * 24 * 7)
def json_fusion(request, slug, **response_kwargs):
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
#proteins = Construct.objects.all().values_list('protein', flat = True)
cons = Construct.objects.all().prefetch_related('crystal', 'protein__family','deletions')
jsondata = "glyco"
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def json_palmi(request, slug, **response_kwargs):
start_time = time.time()
seq = Protein.objects.filter(entry_name=slug).values_list('sequence', flat = True).get()
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug,protein_segment__slug__in=['H8','C-term']).order_by('sequence_number').prefetch_related('protein_segment')
residues = {}
seq = ''
end_h8 = 0
start_h8 = 0
for r in rs:
if not start_h8 and r.protein_segment.slug == 'H8':
start_h8 = r.sequence_number
if not end_h8 and r.protein_segment.slug == 'C-term':
end_h8 = r.sequence_number-1 #end_h8 was prev residue
elif end_h8 and r.sequence_number-10>end_h8:
continue
seq += r.amino_acid
residues[r.sequence_number] = r.protein_segment.slug
#No proline!
p = re.compile("C")
#print('all')
mutations_all = []
for m in p.finditer(seq):
mutations_all.append([m.start()+start_h8,"A",'','',m.group(),residues[m.start()+start_h8]])
palmi = OrderedDict()
palmi['']= mutations_all
jsondata = palmi
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("palmi",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def json_glyco(request, slug, **response_kwargs):
start_time = time.time()
seq = Protein.objects.filter(entry_name=slug).values_list('sequence', flat = True).get()
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug).prefetch_related('protein_segment')
residues = {}
for r in rs:
residues[r.sequence_number] = r.protein_segment.slug
#No proline!
p = re.compile("N[^P][TS]")
#print('all')
mutations_all = []
matches = re.finditer(r'(?=([N][^P][TS]))',seq)
matches_seq = re.findall(r'(?=([N][^P][TS]))',seq)
#{"all": [[39, "Q", "", "", "NTS", "N-term"], [203, "Q", "", "", "NNT", "ECL2"]], "mammalian": [[205, "V", 206, "V", "TTCVLNDPN", "ECL2"]]}
for i,m in enumerate(matches):
#print(matches_seq[i],m.start())
#print(m.start(), m.group())
if residues[m.start()+1] in ['N-term','ECL1','ECL2','ECL3']:
mutations_all.append([m.start()+1,"Q",'','',matches_seq[i],residues[m.start()+1]])
#print('mamalian')
#p = re.compile("[TS]{2}[A-Z]{1,11}[N]", overlapped=True)
matches = re.finditer(r'(?=([TS]{2}[A-Z]{1,10}[N]))',seq)
matches_seq = re.findall(r'(?=([TS]{2}[A-Z]{1,10}[N]))',seq)
#matches = re.findall(r'(?=(\w\w))', seq)
#print(matches)
mutations_mammalian = []
for i,m in enumerate(matches):
#print(matches_seq[i],m.start())
if matches_seq[i][0]=="T":
pos0 = "V"
if matches_seq[i][1]=="T":
pos1 = "V"
if matches_seq[i][0]=="S":
pos0 = "A"
if matches_seq[i][1]=="S":
pos1 = "A"
if residues[m.start()+1] in ['N-term','ECL1','ECL2','ECL3']:
mutations_mammalian.append([m.start()+1,pos0,m.start()+2,pos1,matches_seq[i],residues[m.start()+1]])
glyco = OrderedDict()
glyco['n-linked']= mutations_all
glyco['o-linked'] = mutations_mammalian
jsondata = glyco
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("glyco",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def json_icl3(request, slug, **response_kwargs):
start_time = time.time()
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
##PREPARE TM1 LOOKUP DATA
proteins = Construct.objects.all().values_list('protein', flat = True)
tm5_start = {}
tm5_end = {}
tm6_start = {}
tm6_end = {}
tm5_50 = {}
tm6_50 = {}
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).prefetch_related('protein').filter(residue__protein_segment__slug='TM5').annotate(start=Min('residue__sequence_number'), end=Max('residue__sequence_number'))
for pc in pconfs:
tm5_start[pc.protein.entry_name] = pc.start
tm5_end[pc.protein.entry_name] = pc.end
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).prefetch_related('protein').filter(residue__protein_segment__slug='TM6').annotate(start=Min('residue__sequence_number'), end=Max('residue__sequence_number'))
for pc in pconfs:
tm6_start[pc.protein.entry_name] = pc.start
tm6_end[pc.protein.entry_name] = pc.end
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).prefetch_related('protein').filter(residue__generic_number__label__in=['5x50','6x50']).annotate(start=Min('residue__sequence_number'), end=Max('residue__sequence_number'))
for pc in pconfs:
tm5_50[pc.protein.entry_name] = pc.start
tm6_50[pc.protein.entry_name] = pc.end
cons = Construct.objects.all().prefetch_related('crystal', 'protein__family','deletions','structure__state','insertions__insert_type')
deletions = OrderedDict()
deletions['Receptor'] = {}
deletions['Receptor Family'] = {}
deletions['Ligand Type'] = {}
deletions['Class'] = {}
deletions['Different Class'] = {}
states = {}
for c in cons:
p = c.protein
entry_name = p.entry_name
p_level = p.family.slug
d_level, d_level_name = compare_family_slug(level,p_level)
if d_level==-1: continue
pdb = c.crystal.pdb_code
state = c.structure.state.slug
if pdb not in states:
states[pdb] = state
fusion, f_results, linkers = c.fusion()
if fusion:
f_protein = f_results[0][2]
else:
f_protein = ""
for deletion in c.deletions.all():
#print(pdb,deletion.start,deletion.end)
if deletion.start > tm5_start[entry_name] and deletion.start < tm6_end[entry_name]:
if p.entry_name not in deletions[d_level_name]:
deletions[d_level_name][entry_name] = {}
#deletions[entry_name][pdb] = [tm5_end[entry_name],tm6_start[entry_name],deletion.start,deletion.end,deletion.start-tm5_end[entry_name],tm6_start[entry_name]-deletion.end]
deletions[d_level_name][entry_name][pdb] = [deletion.start-tm5_50[entry_name]-1,tm6_50[entry_name]-deletion.end-1,state,str(fusion),f_protein]
# if (str(fusion)=='icl3'):
# print(entry_name,pdb,50+deletion.start-tm5_50[entry_name],50-(tm6_50[entry_name]-deletion.end-1),str(fusion),f_protein)
# for pdb,state in sorted(states.items()):
# print(pdb,"\t",state)
jsondata = deletions
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("icl3",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def json_icl2(request, slug, **response_kwargs):
start_time = time.time()
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
##PREPARE TM1 LOOKUP DATA
proteins = Construct.objects.all().values_list('protein', flat = True)
tm3_start = {}
tm3_end = {}
tm4_start = {}
tm4_end = {}
tm3_50 = {}
tm4_50 = {}
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).prefetch_related('protein').filter(residue__protein_segment__slug='TM3').annotate(start=Min('residue__sequence_number'), end=Max('residue__sequence_number'))
for pc in pconfs:
tm3_start[pc.protein.entry_name] = pc.start
tm3_end[pc.protein.entry_name] = pc.end
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).prefetch_related('protein').filter(residue__protein_segment__slug='TM4').annotate(start=Min('residue__sequence_number'), end=Max('residue__sequence_number'))
for pc in pconfs:
tm4_start[pc.protein.entry_name] = pc.start
tm4_end[pc.protein.entry_name] = pc.end
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).prefetch_related('protein').filter(residue__generic_number__label__in=['3x50','4x50']).annotate(start=Min('residue__sequence_number'), end=Max('residue__sequence_number'))
for pc in pconfs:
tm3_50[pc.protein.entry_name] = pc.start
tm4_50[pc.protein.entry_name] = pc.end
cons = Construct.objects.all().prefetch_related('crystal', 'protein__family','deletions','structure__state','insertions__insert_type')
deletions = OrderedDict()
deletions['Receptor'] = {}
deletions['Receptor Family'] = {}
deletions['Ligand Type'] = {}
deletions['Class'] = {}
deletions['Different Class'] = {}
states = {}
for c in cons:
p = c.protein
entry_name = p.entry_name
p_level = p.family.slug
d_level, d_level_name = compare_family_slug(level,p_level)
if d_level==-1: continue
pdb = c.crystal.pdb_code
state = c.structure.state.slug
if pdb not in states:
states[pdb] = state
fusion, f_results, linkers = c.fusion()
if fusion:
f_protein = f_results[0][2]
else:
f_protein = ""
for deletion in c.deletions.all():
#print(pdb,deletion.start,deletion.end)
if deletion.start > tm3_start[entry_name] and deletion.start < tm4_end[entry_name]:
if p.entry_name not in deletions[d_level_name]:
deletions[d_level_name][entry_name] = {}
#deletions[entry_name][pdb] = [tm5_end[entry_name],tm6_start[entry_name],deletion.start,deletion.end,deletion.start-tm5_end[entry_name],tm6_start[entry_name]-deletion.end]
deletions[d_level_name][entry_name][pdb] = [deletion.start-tm3_50[entry_name]-1,tm4_50[entry_name]-deletion.end-1,state,str(fusion),f_protein]
# for pdb,state in sorted(states.items()):
# print(pdb,"\t",state)
jsondata = deletions
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("icl2",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def json_nterm(request, slug, **response_kwargs):
start_time = time.time()
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
##PREPARE TM1 LOOKUP DATA
proteins = Construct.objects.all().values_list('protein', flat = True)
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).prefetch_related('protein').filter(residue__protein_segment__slug='TM1').annotate(start=Min('residue__sequence_number'))
#pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).filter(residue__generic_number__label__in=['1x50']).values_list('protein__entry_name','residue__sequence_number','residue__generic_number__label')
tm1_start = {}
for pc in pconfs:
tm1_start[pc.protein.entry_name] = pc.start
cons = Construct.objects.all().prefetch_related('crystal', 'protein__family','deletions','structure__state','insertions__insert_type')
deletions = OrderedDict()
deletions['Receptor'] = {}
deletions['Receptor Family'] = {}
deletions['Ligand Type'] = {}
deletions['Class'] = {}
deletions['Different Class'] = {}
for c in cons:
p = c.protein
entry_name = p.entry_name
p_level = p.family.slug
d_level, d_level_name = compare_family_slug(level,p_level)
if d_level==-1: continue
pdb = c.crystal.pdb_code
state = c.structure.state.slug
fusion, f_results, linkers = c.fusion()
if fusion:
f_protein = f_results[0][2]
else:
f_protein = ""
for deletion in c.deletions.all():
if deletion.start < tm1_start[entry_name]:
if p.entry_name not in deletions[d_level_name]:
deletions[d_level_name][entry_name] = {}
deletions[d_level_name][entry_name][pdb] = [deletion.start,deletion.end-1, tm1_start[entry_name]-deletion.end-1,state,str(fusion),f_protein]
jsondata = deletions
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("nterm",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def json_cterm(request, slug, **response_kwargs):
start_time = time.time()
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
##PREPARE TM1 LOOKUP DATA
proteins = Construct.objects.all().values_list('protein', flat = True)
# pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).filter(residue__protein_segment__slug='C-term').annotate(start=Min('residue__sequence_number'))
# cterm_start = {}
# for pc in pconfs:
# cterm_start[pc.protein.entry_name] = pc.start
# pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).filter(residue__protein_segment__slug='C-term').annotate(start=Min('residue__sequence_number')).values_list('protein__entry_name','start','residue__generic_number__label')
#pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).filter(residue__generic_number__label__in=['8x50']).values_list('protein__entry_name','residue__sequence_number','residue__generic_number__label')
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).prefetch_related('protein').filter(residue__protein_segment__slug='C-term').annotate(start=Min('residue__sequence_number'))
cterm_start = {}
for pc in pconfs:
cterm_start[pc.protein.entry_name] = pc.start
cons = Construct.objects.all().prefetch_related('crystal', 'protein__family','deletions','structure__state','insertions__insert_type')
deletions = OrderedDict()
deletions['Receptor'] = {}
deletions['Receptor Family'] = {}
deletions['Ligand Type'] = {}
deletions['Class'] = {}
deletions['Different Class'] = {}
for c in cons:
p = c.protein
entry_name = p.entry_name
p_level = p.family.slug
d_level, d_level_name = compare_family_slug(level,p_level)
if d_level==-1: continue
pdb = c.crystal.pdb_code
state = c.structure.state.slug
fusion, f_results, linkers = c.fusion()
if fusion:
f_protein = f_results[0][2]
else:
f_protein = ""
for deletion in c.deletions.all():
if deletion.start >= cterm_start[entry_name]:
if p.entry_name not in deletions[d_level_name]:
deletions[d_level_name][entry_name] = {}
deletions[d_level_name][entry_name][pdb] = [deletion.start,deletion.end, deletion.start-cterm_start[entry_name],state,str(fusion),f_protein]
jsondata = deletions
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("cterm",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def thermostabilising(request, slug, **response_kwargs):
start_time = time.time()
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug).prefetch_related('protein_segment','display_generic_number','generic_number')
wt_lookup = {}
wt_lookup_pos = {}
for r in rs:
if r.generic_number:
gn = r.generic_number.label
wt_lookup[gn] = [r.amino_acid, r.sequence_number]
pos = r.sequence_number
wt_lookup_pos[pos] = [r.amino_acid]
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
if level.split("_")[0]=='001':
c_level = 'A'
elif level.split("_")[0]=='002':
c_level = 'B'
elif level.split("_")[0]=='003':
c_level = 'B'
elif level.split("_")[0]=='004':
c_level = 'C'
elif level.split("_")[0]=='006':
c_level = 'F'
else:
c_level = ''
path = os.sep.join([settings.DATA_DIR, 'structure_data', 'construct_data', 'termo.xlsx'])
d = parse_excel(path)
if c_level in d:
termo = d[c_level]
else:
termo = []
results = OrderedDict()
results['1'] = {}
results['2'] = {} #fixed mut
results['3'] = {} #fixed wt
for mut in termo:
gn = mut['GN']
mut_aa = mut['MUT']
wt_aa = mut['WT']
entry_name = mut['UniProt']
pos = int(mut['POS'])
pdb = mut['PDB']
if mut['Effect'] != 'Thermostabilising':
continue #only thermo!
if gn is "":
continue
if (entry_name == slug) or (entry_name.split('_')[0] == slug.split('_')[0] and wt_aa == wt_lookup[gn][0]):
if gn not in results['1']:
results['1'][gn] = {}
if mut_aa not in results['1'][gn]:
results['1'][gn][mut_aa] = {'pdbs':[], 'hits':0, 'wt':wt_lookup[gn]}
if mut['PDB'] not in results['1'][gn][mut_aa]['pdbs']:
results['1'][gn][mut_aa]['pdbs'].append(pdb)
results['1'][gn][mut_aa]['hits'] += 1
if gn:
if gn in wt_lookup:
if gn not in results['2']:
results['2'][gn] = {}
if mut_aa not in results['2'][gn]:
results['2'][gn][mut_aa] = {'pdbs':[], 'proteins':[], 'hits':0, 'wt':wt_lookup[gn]}
if entry_name not in results['2'][gn][mut_aa]['proteins']:
results['2'][gn][mut_aa]['proteins'].append(entry_name)
results['2'][gn][mut_aa]['hits'] += 1
if wt_lookup[gn][0] == wt_aa:
if gn not in results['3']:
results['3'][gn] = {}
if wt_aa not in results['3'][gn]:
results['3'][gn][wt_aa] = {'pdbs':[], 'proteins':[], 'hits':0, 'wt':wt_lookup[gn], 'muts':[]}
if entry_name not in results['3'][gn][wt_aa]['proteins']:
results['3'][gn][wt_aa]['proteins'].append(entry_name)
results['3'][gn][wt_aa]['hits'] += 1
if mut_aa not in results['3'][gn][wt_aa]['muts']:
results['3'][gn][wt_aa]['muts'].append(mut_aa)
temp = {}
for gn, vals1 in results['2'].items():
for mut_aa, vals2 in vals1.items():
if vals2['hits']>1:
if gn not in temp:
temp[gn] = {}
if mut_aa not in temp[gn]:
temp[gn][mut_aa] = vals2
#results['2'][gn].pop(mut_aa, None)
results['2'] = temp
temp_single = {}
temp = {}
for gn, vals1 in results['3'].items():
for mut_aa, vals2 in vals1.items():
if vals2['hits']>1:
if gn not in temp:
temp[gn] = {}
if mut_aa not in temp[gn]:
temp[gn][mut_aa] = vals2
#results['2'][gn].pop(mut_aa, None)
elif vals2['hits']==1:
if gn not in temp_single:
temp_single[gn] = {}
if mut_aa not in temp_single[gn]:
temp_single[gn][mut_aa] = vals2
results['3'] = temp
results['4'] = temp_single
jsondata = results
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("termo",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def structure_rules(request, slug, **response_kwargs):
start_time = time.time()
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug).prefetch_related('protein_segment','display_generic_number','generic_number')
wt_lookup = {}
wt_lookup_pos = {}
for r in rs:
if r.generic_number:
gn = r.generic_number.label
wt_lookup[gn] = [r.amino_acid, r.sequence_number]
pos = r.sequence_number
wt_lookup_pos[pos] = [r.amino_acid]
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
if level.split("_")[0]=='001':
c_level = 'A'
elif level.split("_")[0]=='002':
c_level = 'B'
elif level.split("_")[0]=='003':
c_level = 'B'
elif level.split("_")[0]=='004':
c_level = 'C'
elif level.split("_")[0]=='006':
c_level = 'F'
else:
c_level = ''
# path = os.sep.join([settings.DATA_DIR, 'structure_data', 'construct_data', 'structure_rules.xlsx'])
# d = parse_excel(path)
# d_clean = {}
# regex = r"(\d+)x(\d+)"
# for rule_class, values in d.items():
# d_clean[rule_class] = []
# for rule in values:
# if rule['Type']!="Structure-based":
# # Only use structure based ones in this function
# continue
# if re.search(regex, rule['Definition']):
# match = re.search(regex, rule['Definition'])
# gn = match.group(1) + "x" + match.group(2)
# print(rule['Definition'],gn)
# else:
# continue
# regex = r"(\d+)x(\d+)"
# if re.search(regex, rule['Definition']):
# match = re.search(regex, rule['Definition'])
# rule['Generic Position'] = match.group(1) + "x" + match.group(2)
# else:
# continue
# d_clean[rule_class].append(rule)
# # print(d)
# print(json.dumps(d_clean,sort_keys=True, indent=4))
d = STRUCTURAL_RULES
# print(d)
if c_level in d:
rules = d[c_level]
else:
rules = []
results = OrderedDict()
results['active'] = {}
results['inactive'] = {} #fixed mut
for rule in rules:
# if rule['Type']!="Structure-based":
# # Only use structure based ones in this function
# continue
# regex = r"(\d+)x(\d+)"
# if re.search(regex, rule['Definition']):
# match = re.search(regex, rule['Definition'])
# gn = match.group(1) + "x" + match.group(2)
# print(rule['Definition'],gn)
# else:
# continue
gn = rule['Generic Position']
mut_aa = rule['Mut AA']
wt_aas = rule['Wt AA'].split("/")
definition = rule['Design Principle']+" "+rule['Addition / Removal']
state = rule['State'].lower()
valid = False
if gn in wt_lookup:
for wt_aa in wt_aas:
if wt_aa=='X' and wt_lookup[gn][0]!=mut_aa: #if universal but not mut aa
valid = True
elif wt_lookup[gn][0]==wt_aa:
valid = True
if valid:
mut = {'wt':wt_lookup[gn][0], 'gn': gn, 'pos':wt_lookup[gn][1], 'mut':mut_aa, 'definition':definition}
if state=='all':
if gn not in results['active']:
results['active'][gn] = []
if gn not in results['inactive']:
results['inactive'][gn] = []
results['active'][gn].append(mut)
results['inactive'][gn].append(mut)
else:
if gn not in results[state]:
results[state][gn] = []
results[state][gn].append(mut)
# entry_name = mut['UniProt']
# pos = int(mut['POS'])
# pdb = mut['PDB']
# if mut['Effect'] != 'Thermostabilising':
# continue #only thermo!
# if entry_name == slug:
# if gn not in results['1']:
# results['1'][gn] = {}
# if mut_aa not in results['1'][gn]:
# results['1'][gn][mut_aa] = {'pdbs':[], 'hits':0, 'wt':wt_lookup[gn]}
# if mut['PDB'] not in results['1'][gn][mut_aa]['pdbs']:
# results['1'][gn][mut_aa]['pdbs'].append(pdb)
# results['1'][gn][mut_aa]['hits'] += 1
# if gn:
# if gn in wt_lookup:
# if gn not in results['2']:
# results['2'][gn] = {}
# if mut_aa not in results['2'][gn]:
# results['2'][gn][mut_aa] = {'pdbs':[], 'proteins':[], 'hits':0, 'wt':wt_lookup[gn]}
# if entry_name not in results['2'][gn][mut_aa]['proteins']:
# results['2'][gn][mut_aa]['proteins'].append(entry_name)
# results['2'][gn][mut_aa]['hits'] += 1
# if wt_lookup[gn][0] == wt_aa:
# if gn not in results['3']:
# results['3'][gn] = {}
# if wt_aa not in results['3'][gn]:
# results['3'][gn][wt_aa] = {'pdbs':[], 'proteins':[], 'hits':0, 'wt':wt_lookup[gn], 'muts':[]}
# if entry_name not in results['3'][gn][wt_aa]['proteins']:
# results['3'][gn][wt_aa]['proteins'].append(entry_name)
# results['3'][gn][wt_aa]['hits'] += 1
# if mut_aa not in results['3'][gn][wt_aa]['muts']:
# results['3'][gn][wt_aa]['muts'].append(mut_aa)
# temp = {}
# for gn, vals1 in results['2'].items():
# for mut_aa, vals2 in vals1.items():
# if vals2['hits']>1:
# if gn not in temp:
# temp[gn] = {}
# if mut_aa not in temp[gn]:
# temp[gn][mut_aa] = vals2
# #results['2'][gn].pop(mut_aa, None)
# results['2'] = temp
# temp = {}
# for gn, vals1 in results['3'].items():
# for mut_aa, vals2 in vals1.items():
# if vals2['hits']>1:
# if gn not in temp:
# temp[gn] = {}
# if mut_aa not in temp[gn]:
# temp[gn][mut_aa] = vals2
# #results['2'][gn].pop(mut_aa, None)
# results['3'] = temp
jsondata = results
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("rules",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def mutations(request, slug, **response_kwargs):
from django.db import connection
start_time = time.time()
protein = Protein.objects.get(entry_name=slug)
protein_class_slug = protein.family.slug.split("_")[0]
protein_rf_name = protein.family.parent.name
protein_rf_slug = protein.family.parent.slug
protein_rf_count = ProteinFamily.objects.filter(parent__slug=protein_rf_slug).count()
# Grab thermostabilising mutations
key = "CD_all_thermo_mutations_class_%s" % protein_class_slug
mutations = cache.get(key)
if not mutations:
mutations = []
mutations_thermo = ConstructMutation.objects.filter(effects__slug='thermostabilising', construct__protein__family__parent__parent__parent__slug=protein_class_slug).all()\
.prefetch_related(
# "construct__structure__state",
"residue__generic_number",
# "residue__protein_segment",
"construct__protein__family__parent__parent__parent",
"construct__crystal"
)
for mutant in mutations_thermo:
if not mutant.residue.generic_number:
continue
prot = mutant.construct.protein
p_receptor = prot.family.parent.name
real_receptor = prot.entry_name
pdb = mutant.construct.crystal.pdb_code
gn = mutant.residue.generic_number.label
mutations.append(([mutant.sequence_number,mutant.wild_type_amino_acid,mutant.mutated_amino_acid],real_receptor,pdb, p_receptor,gn))
cache.set(key,mutations,60*60*24)
# Build current target residue GN mapping
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug, generic_number__isnull=False).prefetch_related('generic_number', 'protein_segment')
# Build a dictionary to know how far a residue is from segment end/start
# Used for propensity removals
start_end_segments = {}
for r in rs:
if r.protein_segment.slug not in start_end_segments:
start_end_segments[r.protein_segment.slug] = {'start':r.sequence_number}
start_end_segments[r.protein_segment.slug]['end'] = r.sequence_number
wt_lookup = {}
GP_residues_in_target = []
for r in rs:
gn = r.generic_number.label
from_start = r.sequence_number-start_end_segments[r.protein_segment.slug]['start']
from_end = start_end_segments[r.protein_segment.slug]['end'] - r.sequence_number
wt_lookup[gn] = [r.amino_acid, r.sequence_number,r.protein_segment.slug, r.display_generic_number.label]
if r.amino_acid in ["G","P"] and from_start>=4 and from_end>=4:
# build a list of potential GP removals (ignore those close to helix borders)
GP_residues_in_target.append(gn)
# Go through all mutations and find groupings (common)
mutation_list = OrderedDict()
for mutation in mutations:
pos = mutation[0][0]
mut_wt = mutation[0][1]
mut_mut = mutation[0][2]
entry_name = mutation[1]
pdb = mutation[2]
family = mutation[3]
gn = mutation[4]
# First do the ones with same WT
full_mutation = "%s_%s_%s" % (gn,mut_wt,"X")
if gn in wt_lookup and wt_lookup[gn][0]==mut_wt:
# Only use those that have the same WT residue at GN
if full_mutation not in mutation_list:
mutation_list[full_mutation] = {'proteins':[], 'hits':0, 'mutation':[[],[]], 'wt':'', 'pdbs':[], 'protein_families': []}
entry_name = mutation[1].split("_")[0]
if entry_name not in mutation_list[full_mutation]['proteins']:
mutation_list[full_mutation]['proteins'].append(entry_name)
mutation_list[full_mutation]['hits'] += 1
mutation_list[full_mutation]['mutation'][0].append(mut_wt)
mutation_list[full_mutation]['mutation'][1].append(mut_mut)
if gn in wt_lookup:
mutation_list[full_mutation]['wt'] = wt_lookup[gn]
if family not in mutation_list[full_mutation]['protein_families']:
mutation_list[full_mutation]['protein_families'].append(family)
if pdb not in mutation_list[full_mutation]['pdbs']:
mutation_list[full_mutation]['pdbs'].append(pdb)
# Second, check those with same mutated AA
full_mutation = "%s_%s_%s" % (gn,"X",mut_mut)
if gn in wt_lookup and wt_lookup[gn][0]!=mut_mut:
if full_mutation not in mutation_list:
mutation_list[full_mutation] = {'proteins':[], 'hits':0, 'mutation':[[],[]], 'wt':'', 'pdbs':[], 'protein_families': []}
entry_name = mutation[1].split("_")[0]
if entry_name not in mutation_list[full_mutation]['proteins']:
mutation_list[full_mutation]['proteins'].append(entry_name)
mutation_list[full_mutation]['hits'] += 1
mutation_list[full_mutation]['mutation'][0].append(mut_wt)
mutation_list[full_mutation]['mutation'][1].append(mut_mut)
if gn in wt_lookup:
mutation_list[full_mutation]['wt'] = wt_lookup[gn]
if family not in mutation_list[full_mutation]['protein_families']:
mutation_list[full_mutation]['protein_families'].append(family)
if pdb not in mutation_list[full_mutation]['pdbs']:
mutation_list[full_mutation]['pdbs'].append(pdb)
# Go through the previous list and filter with rules and add rule matches
simple_list = OrderedDict()
mutation_list = OrderedDict(sorted(mutation_list.items(), key=lambda x: x[1]['hits'],reverse=True))
for gn, vals in mutation_list.items():
definition_matches = []
if gn.split("_")[1] == "X":
# Below rules only apply the mutations that share the same mutation AA
if slug.split("_")[0] in vals['proteins']:
# Check if same receptor
definition_matches.append([1,'same_receptor'])
elif protein_rf_name in vals['protein_families']:
# Check if same receptor receptor family
definition_matches.append([2,'same_receptor_family'])
elif len(vals['protein_families'])<2:
# If not same receptor or receptor family and not in two receptor families,
# it is just a single match on position used in B-F class
if protein_class_slug!='001':
# If class A require two distinct receptor families
definition_matches.append([4,'same_pos'])
if len(vals['protein_families'])>=2:
# If mutation is seen in >=2 receptor families
# Put this one outside the above logic, to allow multi definitions
definition_matches.append([4,'hotspot_mut'])
# # Check for membrane binding
# if 'K' in vals['mutation'][1] or 'R' in vals['mutation'][1]:
# if vals['wt'][0] not in ['R','K']:
# # Only if not R,K already
# definition_matches.append([2,'membrane_binding'])
# elif vals['wt'][0] in ['K'] and 'R' in vals['mutation'][1]:
# # If K
# definition_matches.append([3,'membrane_binding_weak'])
else:
# Below rules is for the common WT (But different mut AA)
if len(vals['protein_families'])>=2:
definition_matches.append([2,'hotspot_wt'])
elif protein_rf_name not in vals['protein_families']:
# if receptor family not the one, then check if it's a same wt match for B-F
if protein_class_slug!='001':
# If class A require two distinct receptor families
definition_matches.append([3,'same_wt'])
if definition_matches:
min_priority = min(x[0] for x in definition_matches)
pos = vals['wt'][1]
wt_aa = vals['wt'][0]
segment = vals['wt'][2]
origin = {'pdbs': vals['pdbs'], 'protein_families': vals['protein_families'], 'proteins': vals['proteins'], 'hits':vals['hits']}
gpcrdb = gn.split("_")[0]
for mut_aa in set(vals['mutation'][1]):
if mut_aa!=wt_aa:
mut = {'wt_aa': wt_aa, 'segment': segment, 'pos': pos, 'gpcrdb':gpcrdb, 'mut_aa':mut_aa, 'definitions' : definition_matches, 'priority': min_priority, 'origin': [origin]}
key = '%s%s%s' % (wt_aa,pos,mut_aa)
# print(key,mut)
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += definition_matches
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
simple_list[key]['origin'].append(origin)
# TODO : overlay with other types of mutations, e.g. surfacing expressing
# Conservation rules and Helix propensity rules
CONSERVED_RESIDUES = 'ADEFIJLMNQSTVY'
POS_RESIDUES = 'HKR'
if protein_rf_count>1:
# Only perform on RF families with more than one member
rf_conservation = calculate_conservation(slug=protein_rf_slug)
rf_cutoff = 7
rf_cutoff_pos = 4
rf_conservation_priority = 3
definition_matches = [rf_conservation_priority,'conservation_rf']
for cons_gn, aa in rf_conservation.items():
if cons_gn in wt_lookup and wt_lookup[cons_gn][0]!=aa[0] and aa[0]!="+":
# If cons_gn exist in target but AA is not the same
if (int(aa[1])>=rf_cutoff and aa[0] in CONSERVED_RESIDUES): # or (int(aa[1])>=rf_cutoff_pos and aa[0] in POS_RESIDUES) # EXCLUDE POSITIVE RULE AT RF LEVEL
# differenciate between the two rules for pos or the other residues as they require different cons levels
mut = {'wt_aa': wt_lookup[cons_gn][0], 'segment': wt_lookup[cons_gn][2], 'pos': wt_lookup[cons_gn][1], 'gpcrdb':cons_gn, 'mut_aa':aa[0], 'definitions' : [definition_matches], 'priority': rf_conservation_priority}
key = '%s%s%s' % (wt_lookup[cons_gn][0],wt_lookup[cons_gn][1],aa[0])
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [definition_matches]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
# Apply helix propensity rule (P)
if cons_gn in GP_residues_in_target:
remove = False
if wt_lookup[cons_gn][0]=='P':
# If it is P then only change if ONLY P
if aa[2]['P'][0] == 1:
# if only one count of P (will be this P)
remove = True
# elif wt_lookup[cons_gn][0]=='G':
# print('it is G',aa[2]['G'])
# cut_offs = {'001':0.03, '002': 0.21, '003': 0.19, '004': 0.21 ,'006': 0.21}
# if protein_class_slug in cut_offs:
# cut_off = cut_offs[protein_class_slug]
# print('cutoff',cut_off,cut_off>aa[2]['G'][1])
# if cut_off>aa[2]['G'][1]:
# # if cut_off is larger than conserved fraction of G, then it can be removed
# remove = True
if remove:
rule = [3,"remove_unconserved_%s" % wt_lookup[cons_gn][0]]
mut = {'wt_aa': wt_lookup[cons_gn][0], 'segment': wt_lookup[cons_gn][2], 'pos': wt_lookup[cons_gn][1], 'gpcrdb':cons_gn, 'mut_aa':'A', 'definitions' : [rule], 'priority': 3}
key = '%s%s%s' % (wt_lookup[cons_gn][0],wt_lookup[cons_gn][1],'A')
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [rule]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
class_conservation = calculate_conservation(slug=protein_class_slug)
class_cutoff = 7
class_cutoff_pos = 4
class_conservation_priority = 3
definition_matches = [class_conservation_priority,'conservation_class']
for cons_gn, aa in class_conservation.items():
if cons_gn in wt_lookup and wt_lookup[cons_gn][0]!=aa[0] and aa[0]!="+":
# If cons_gn exist in target but AA is not the same
if (int(aa[1])>=class_cutoff and aa[0] in CONSERVED_RESIDUES) or (int(aa[1])>=class_cutoff_pos and aa[0] in POS_RESIDUES):
# differenciate between the two rules for pos or the other residues as they require different cons levels
mut = {'wt_aa': wt_lookup[cons_gn][0], 'segment': wt_lookup[cons_gn][2], 'pos': wt_lookup[cons_gn][1], 'gpcrdb':cons_gn, 'mut_aa':aa[0], 'definitions' : [definition_matches], 'priority': class_conservation_priority}
key = '%s%s%s' % (wt_lookup[cons_gn][0],wt_lookup[cons_gn][1],aa[0])
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [definition_matches]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
# Apply helix propensity rule (P+G)
if cons_gn in GP_residues_in_target:
remove = False
if wt_lookup[cons_gn][0]=='P':
# If it is P then only change if ONLY P
if aa[2]['P'][0] == 1:
# if only one count of P (will be this P)
remove = True
elif wt_lookup[cons_gn][0]=='G':
cut_offs = {'001':0.03, '002': 0.21, '003': 0.19, '004': 0.21 ,'006': 0.21}
if protein_class_slug in cut_offs:
cut_off = cut_offs[protein_class_slug]
if cut_off>aa[2]['G'][1]:
# if cut_off is larger than conserved fraction of G, then it can be removed
remove = True
if remove:
rule = [3,"remove_unconserved_%s" % wt_lookup[cons_gn][0]]
mut = {'wt_aa': wt_lookup[cons_gn][0], 'segment': wt_lookup[cons_gn][2], 'pos': wt_lookup[cons_gn][1], 'gpcrdb':cons_gn, 'mut_aa':'A', 'definitions' : [rule], 'priority': 3}
key = '%s%s%s' % (wt_lookup[cons_gn][0],wt_lookup[cons_gn][1],'A')
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [rule]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
# # Apply helix propensity rules from class when receptor family only has one member or non-classA
# if (protein_rf_count==1 or protein_class_slug!='001') and cons_gn in GP_residues_in_target:
# if not (wt_lookup[cons_gn][0]==aa[0] and int(aa[1])>5):
# rule = [2,"remove_unconserved_%s" % wt_lookup[cons_gn][0]]
# mut = {'wt_aa': wt_lookup[cons_gn][0], 'segment': wt_lookup[cons_gn][2], 'pos': wt_lookup[cons_gn][1], 'gpcrdb':cons_gn, 'mut_aa':'A', 'definitions' : [rule], 'priority': 2}
# key = '%s%s%s' % (wt_lookup[cons_gn][0],wt_lookup[cons_gn][1],'A')
# if key not in simple_list:
# simple_list[key] = mut
# else:
# if rule not in simple_list[key]['definitions']:
# # Do not add this rule if it is already there (From RF check)
# simple_list[key]['definitions'] += [rule]
# min_priority = min(x[0] for x in simple_list[key]['definitions'])
# simple_list[key]['priority'] = min_priority
if protein_class_slug in ['001','002','003']:
# Only perform the xtal cons rules for A, B1 and B2
xtals_conservation = cache.get("CD_xtal_cons_"+protein_class_slug)
if not xtals_conservation:
c_proteins = Construct.objects.filter(protein__family__slug__startswith = protein_class_slug).all().values_list('protein__pk', flat = True).distinct()
xtal_proteins = Protein.objects.filter(pk__in=c_proteins)
if len(xtal_proteins)>0:
xtals_conservation = calculate_conservation(proteins=xtal_proteins)
cache.set("CD_xtal_cons_"+protein_class_slug,xtals_conservation,60*60*24)
xtals_cutoff = 7
xtals_cutoff_pos = 4
xtals_conservation_priority = 3
definition_matches = [xtals_conservation_priority,'conservation_xtals']
for cons_gn, aa in class_conservation.items():
if cons_gn in wt_lookup and wt_lookup[cons_gn][0]!=aa[0] and aa[0]!="+":
# If cons_gn exist in target but AA is not the same
if (int(aa[1])>=xtals_cutoff and aa[0] in CONSERVED_RESIDUES) or (int(aa[1])>=xtals_cutoff_pos and aa[0] in POS_RESIDUES):
# differenciate between the two rules for pos or the other residues as they require different cons levels
mut = {'wt_aa': wt_lookup[cons_gn][0], 'segment': wt_lookup[cons_gn][2], 'pos': wt_lookup[cons_gn][1], 'gpcrdb':cons_gn, 'mut_aa':aa[0], 'definitions' : [definition_matches], 'priority': xtals_conservation_priority}
key = '%s%s%s' % (wt_lookup[cons_gn][0],wt_lookup[cons_gn][1],aa[0])
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [definition_matches]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
# path = os.sep.join([settings.DATA_DIR, 'structure_data', 'Mutation_Rules.xlsx'])
# d = parse_excel(path)
# print(json.dumps(d,sort_keys=True, indent=4))
#print(wt_lookup)
for c, v in STRUCTURAL_SWITCHES.items():
match = False
if protein_class_slug in ['001'] and c=='A':
match = True
elif protein_class_slug in ['002','003'] and c=='B':
match = True
elif protein_class_slug in ['002'] and c=='B1':
match = True
elif protein_class_slug in ['003'] and c=='B2':
match = True
elif protein_class_slug in ['004'] and c=='C':
match = True
if match:
for r in v:
try:
aa_1 = [r['AA1 Pos'],r['Match AA1'],r['Inactive1'],r['Active1']]
aa_2 = [r['AA2 Pos'],r['Match AA2'],r['Inactive2'],r['Active2']]
prio = r['Prio']
motif = r['Motif']
match_1 = False
if r['Match AA1']=='X' or wt_lookup[aa_1[0]][0] in r['Match AA1']:
match_1 = True
match_2 = False
if r['Match AA2']=='X' or wt_lookup[aa_2[0]][0] in r['Match AA2']:
match_2 = True
# Only of the two positions are matched perform mutation
if match_1 and match_2:
# Active state version
# Is AA1 the same as WT?
active = []
if aa_1[3]!='Wt' and aa_1[3]!=wt_lookup[aa_1[0]][0]:
active.append([wt_lookup[aa_1[0]][0],aa_1[3],wt_lookup[aa_1[0]][1],aa_1[0]])
if aa_2[3]!='Wt' and aa_2[3]!=wt_lookup[aa_2[0]][0]:
active.append([wt_lookup[aa_2[0]][0],aa_2[3],wt_lookup[aa_2[0]][1],aa_2[0]])
inactive = []
if aa_1[2]!='Wt' and aa_1[2]!=wt_lookup[aa_1[0]][0]:
inactive.append([wt_lookup[aa_1[0]][0],aa_1[2],wt_lookup[aa_1[0]][1],aa_1[0]])
if aa_2[2]!='Wt' and aa_2[2]!=wt_lookup[aa_2[0]][0]:
inactive.append([wt_lookup[aa_2[0]][0],aa_2[2],wt_lookup[aa_2[0]][1],aa_2[0]])
# print(aa_1,wt_lookup[aa_1[0]],match_1)
# print(aa_2,wt_lookup[aa_2[0]],match_2)
# print("inactive",inactive,len(inactive))
definition_matches = [int(prio),motif]
muts = []
disable_double = True
if len(active)==1:
# print("active",active,len(active))
active = active[0]
mut = {'wt_aa': active[0], 'segment': wt_lookup[active[3]][2], 'pos': active[2], 'gpcrdb':active[3], 'mut_aa':active[1], 'definitions' : [definition_matches], 'priority': int(prio)}
key = 'active_%s%s%s' % (active[0],active[2],active[1])
#print(key,mut)
muts.append([key,mut])
elif len(active)==2:
mut = {'wt_aa1': active[0][0], 'segment1': wt_lookup[active[0][3]][2], 'pos': active[0][2], 'gpcrdb1':active[0][3], 'mut_aa1':active[0][1],'wt_aa2': active[1][0], 'segment2': wt_lookup[active[1][3]][2], 'pos2': active[1][2], 'gpcrdb2':active[1][3], 'mut_aa2':active[1][1], 'definitions' : [definition_matches], 'priority': int(prio)}
key = 'active_%s%s%s_%s%s%s' % (active[0][0],active[0][2],active[0][1],active[1][0],active[1][2],active[1][1])
#print(key,mut)
if not disable_double: muts.append([key,mut])
if len(inactive)==1:
# print("active",inactive,len(inactive))
inactive = inactive[0]
mut = {'wt_aa': inactive[0], 'segment': wt_lookup[inactive[3]][2], 'pos': inactive[2], 'gpcrdb':inactive[3], 'mut_aa':inactive[1], 'definitions' : [definition_matches], 'priority': int(prio)}
key = 'inactive_%s%s%s' % (inactive[0],inactive[2],inactive[1])
#print(key,mut)
muts.append([key,mut])
elif len(inactive)==2:
mut = {'wt_aa1': inactive[0][0], 'segment1': wt_lookup[inactive[0][3]][2], 'pos': inactive[0][2], 'gpcrdb1':inactive[0][3], 'mut_aa1':inactive[0][1],'wt_aa2': inactive[1][0], 'segment2': wt_lookup[inactive[1][3]][2], 'pos2': inactive[1][2], 'gpcrdb2':inactive[1][3], 'mut_aa2':inactive[1][1], 'definitions' : [definition_matches], 'priority': int(prio)}
key = 'inactive_%s%s%s_%s%s%s' % (inactive[0][0],inactive[0][2],inactive[0][1],inactive[1][0],inactive[1][2],inactive[1][1])
# print(key,mut)
if not disable_double: muts.append([key,mut])
for mut in muts:
key = mut[0]
mut = mut[1]
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [definition_matches]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
except Exception as e:
print("problem with",r, e)
# GLYCO
seq = Protein.objects.filter(entry_name=slug).values_list('sequence', flat = True).get()
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug).prefetch_related('protein_segment')
residues = {}
for r in rs:
residues[r.sequence_number] = r.protein_segment.slug
#No proline!
p = re.compile("N[^P][TS]")
matches = re.finditer(r'(?=([N][^P][TS]))',seq)
matches_seq = re.findall(r'(?=([N][^P][TS]))',seq)
#{"all": [[39, "Q", "", "", "NTS", "N-term"], [203, "Q", "", "", "NNT", "ECL2"]], "mammalian": [[205, "V", 206, "V", "TTCVLNDPN", "ECL2"]]}
definition_matches = [int(3),"n-linked glycosylation removal"]
for i,m in enumerate(matches):
#print(matches_seq[i],m.start())
#print(m.start(), m.group())
if residues[m.start()+1] in ['N-term','ECL1','ECL2','ECL3']:
key = '%s%s%s' % ("N",m.start()+1,"Q")
mut = {'wt_aa': "N", 'segment': residues[m.start()+1], 'pos': m.start()+1, 'gpcrdb':'', 'mut_aa':"Q", 'definitions' : [definition_matches], 'priority': 3}
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [definition_matches]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
matches = re.finditer(r'(?=([TS]{2}[A-Z]{1,10}[N]))',seq)
matches_seq = re.findall(r'(?=([TS]{2}[A-Z]{1,10}[N]))',seq)
definition_matches = [int(3),"o-linked glycosylation removal"]
for i,m in enumerate(matches):
#print(matches_seq[i],m.start())
if matches_seq[i][0]=="T":
pos0 = "V"
if matches_seq[i][1]=="T":
pos1 = "V"
if matches_seq[i][0]=="S":
pos0 = "A"
if matches_seq[i][1]=="S":
pos1 = "A"
if residues[m.start()+1] in ['N-term','ECL1','ECL2','ECL3']:
key = '%s%s%s' % (matches_seq[i][0],m.start()+1,pos0)
mut = {'wt_aa': matches_seq[i][0], 'segment': residues[m.start()+1], 'pos': m.start()+1, 'gpcrdb':'', 'mut_aa':pos0, 'definitions' : [definition_matches], 'priority': 3}
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [definition_matches]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
key = '%s%s%s' % (matches_seq[i][1],m.start()+2,pos1)
mut = {'wt_aa': matches_seq[i][1], 'segment': residues[m.start()+1], 'pos': m.start()+2, 'gpcrdb':'', 'mut_aa':pos1, 'definitions' : [definition_matches], 'priority': 3}
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [definition_matches]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list[key]['priority'] = min_priority
#PALMI
definition_matches = [int(3),"palmitoylation removal"]
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug,protein_segment__slug__in=['H8','C-term']).order_by('sequence_number').prefetch_related('protein_segment')
residues = {}
seq = ''
end_h8 = 0
start_h8 = 0
for r in rs:
if not start_h8 and r.protein_segment.slug == 'H8':
start_h8 = r.sequence_number
if not end_h8 and r.protein_segment.slug == 'C-term':
end_h8 = r.sequence_number-1 #end_h8 was prev residue
elif end_h8 and r.sequence_number-10>end_h8:
continue
seq += r.amino_acid
residues[r.sequence_number] = r.protein_segment.slug
#No proline!
p = re.compile("C")
#print('all')
mutations_all = []
for m in p.finditer(seq):
key = '%s%s%s' % ("C",m.start()+start_h8,"Q")
mut = {'wt_aa': "C", 'segment': residues[m.start()+start_h8], 'pos': m.start()+start_h8, 'gpcrdb':'', 'mut_aa':"A", 'definitions' : [definition_matches], 'priority': 3}
if key not in simple_list:
simple_list[key] = mut
else:
simple_list[key]['definitions'] += [definition_matches]
min_priority = min(x[0] for x in simple_list[key]['definitions'])
simple_list = OrderedDict(sorted(simple_list.items(), key=lambda x: (x[1]['priority'],x[1]['pos']) ))
for key, val in simple_list.items():
if val['gpcrdb']:
val['display_gn'] = wt_lookup[val['gpcrdb']][3]
else:
val['display_gn'] = ""
val['definitions'] = list(set([x[1] for x in val['definitions']]))
# print(val)
jsondata = simple_list
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
diff = round(time.time() - start_time,1)
print("muts",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def cons_strucs(request, slug, **response_kwargs):
start_time = time.time()
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
##PREPARE TM1 LOOKUP DATA
c_proteins = Construct.objects.filter(protein__family__slug__startswith = level.split("_")[0]).all().values_list('protein__pk', flat = True).distinct()
xtal_proteins = Protein.objects.filter(pk__in=c_proteins)
align_segments = ProteinSegment.objects.all().filter(slug__in = list(settings.REFERENCE_POSITIONS.keys())).prefetch_related()
amino_acids_stats = {}
amino_acids_groups_stats = {}
potentials = cache.get("CD_xtal_"+level.split("_")[0])
if potentials==None:
a = Alignment()
a.load_proteins(xtal_proteins)
a.load_segments(align_segments) #get all segments to make correct diagrams
# build the alignment data matrix
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
s_id = 0
a_id = 0
for ns, segments in a.generic_numbers.items():
for s, num in segments.items():
for n, dn in num.items():
temp = []
temp2 = []
for i, aa in enumerate(AMINO_ACIDS):
temp.append(a.amino_acid_stats[i][s_id][a_id])
for i, aa in enumerate(AMINO_ACID_GROUPS):
temp2.append(a.feature_stats[i][s_id][a_id])
amino_acids_stats[n] = temp
amino_acids_groups_stats[n] = temp2
a_id += 1
s_id += 1
potentials = {}
for seg, aa_list in a.consensus.items():
for gn, aa in aa_list.items():
if int(aa[1])>5: #if conservations is >50%
potentials[gn] = [aa[0],aa[1]]
cache.set("CD_xtal_"+level.split("_")[0],potentials,60*60*24)
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug, generic_number__label__in=list(potentials.keys())).prefetch_related('protein_segment','display_generic_number','generic_number')
results = {}
for r in rs:
gn = r.generic_number.label
if r.amino_acid!=potentials[gn][0]:
results[gn] = [r.amino_acid, r.sequence_number,potentials[gn][0],potentials[gn][1]]
jsondata = json.dumps(results)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("cons_strucs",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def cons_rf(request, slug, **response_kwargs):
start_time = time.time()
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
##PREPARE TM1 LOOKUP DATA
#c_proteins = Construct.objects.filter(protein__family__slug__startswith = level.split("_")[0]).all().values_list('protein__pk', flat = True).distinct()
rf_proteins = Protein.objects.filter(family__slug__startswith="_".join(level.split("_")[0:3]), source__name='SWISSPROT',species__common_name='Human')
align_segments = ProteinSegment.objects.all().filter(slug__in = list(settings.REFERENCE_POSITIONS.keys())).prefetch_related()
amino_acids_stats = {}
amino_acids_groups_stats = {}
print(len(rf_proteins))
try:
# Load alignment
a = pickle.loads(AlignmentConsensus.objects.get(slug="_".join(level.split("_")[0:3])).alignment)
except:
print('failed!')
a = Alignment()
a.load_proteins(rf_proteins)
a.load_segments(align_segments) #get all segments to make correct diagrams
# build the alignment data matrix
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
s_id = 0
a_id = 0
for ns, segments in a.generic_numbers.items():
for s, num in segments.items():
for n, dn in num.items():
temp = []
temp2 = []
for i, aa in enumerate(AMINO_ACIDS):
temp.append(a.amino_acid_stats[i][s_id][a_id])
for i, aa in enumerate(AMINO_ACID_GROUPS):
temp2.append(a.feature_stats[i][s_id][a_id])
amino_acids_stats[n] = temp
amino_acids_groups_stats[n] = temp2
a_id += 1
s_id += 1
potentials = {}
for seg, aa_list in a.consensus.items():
for gn, aa in aa_list.items():
if int(aa[1])>5: #if conservations is >50%
potentials[gn] = [aa[0],aa[1]]
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug, generic_number__label__in=list(potentials.keys())).prefetch_related('protein_segment','display_generic_number','generic_number')
results = {}
for r in rs:
gn = r.generic_number.label
if r.amino_acid!=potentials[gn][0]:
results[gn] = [r.amino_acid, r.sequence_number,potentials[gn][0],potentials[gn][1]]
jsondata = json.dumps(results)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("cons_rf",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def cons_rf_and_class(request, slug, **response_kwargs):
start_time = time.time()
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
##PREPARE TM1 LOOKUP DATA
#c_proteins = Construct.objects.filter(protein__family__slug__startswith = level.split("_")[0]).all().values_list('protein__pk', flat = True).distinct()
rf_proteins = Protein.objects.filter(family__slug__startswith="_".join(level.split("_")[0:3]), source__name='SWISSPROT',species__common_name='Human')
align_segments = ProteinSegment.objects.all().filter(slug__in = list(settings.REFERENCE_POSITIONS.keys())).prefetch_related()
amino_acids_stats = {}
amino_acids_groups_stats = {}
try:
# Load alignment
a = pickle.loads(AlignmentConsensus.objects.get(slug="_".join(level.split("_")[0:3])).alignment)
except:
print('failed!')
a = Alignment()
a.load_proteins(rf_proteins)
a.load_segments(align_segments) #get all segments to make correct diagrams
# build the alignment data matrix
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
s_id = 0
a_id = 0
for ns, segments in a.generic_numbers.items():
for s, num in segments.items():
for n, dn in num.items():
temp = []
temp2 = []
for i, aa in enumerate(AMINO_ACIDS):
temp.append(a.amino_acid_stats[i][s_id][a_id])
for i, aa in enumerate(AMINO_ACID_GROUPS):
temp2.append(a.feature_stats[i][s_id][a_id])
amino_acids_stats[n] = temp
amino_acids_groups_stats[n] = temp2
a_id += 1
s_id += 1
potentials = {}
for seg, aa_list in a.consensus.items():
for gn, aa in aa_list.items():
if int(aa[1])>5: #if conservations is >50%
potentials[gn] = [aa[0],aa[1]]
potentials2 = cache.get("CD_rfc_"+"_".join(level.split("_")[0:1]))
if potentials2==None:
class_proteins = Protein.objects.filter(family__slug__startswith="_".join(level.split("_")[0:1]), source__name='SWISSPROT',species__common_name='Human')
align_segments = ProteinSegment.objects.all().filter(slug__in = list(settings.REFERENCE_POSITIONS.keys())).prefetch_related()
amino_acids_stats = {}
amino_acids_groups_stats = {}
try:
# Load alignment
a = pickle.loads(AlignmentConsensus.objects.get(slug="_".join(level.split("_")[0:1])).alignment)
except:
print('failed!')
a = Alignment()
a.load_proteins(class_proteins)
a.load_segments(align_segments) #get all segments to make correct diagrams
# build the alignment data matrix
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
s_id = 0
a_id = 0
for ns, segments in a.generic_numbers.items():
for s, num in segments.items():
for n, dn in num.items():
temp = []
temp2 = []
for i, aa in enumerate(AMINO_ACIDS):
temp.append(a.amino_acid_stats[i][s_id][a_id])
for i, aa in enumerate(AMINO_ACID_GROUPS):
temp2.append(a.feature_stats[i][s_id][a_id])
amino_acids_stats[n] = temp
amino_acids_groups_stats[n] = temp2
a_id += 1
s_id += 1
potentials2 = {}
for seg, aa_list in a.consensus.items():
for gn, aa in aa_list.items():
if int(aa[1])>5: #if conservations is >50%
potentials2[gn] = [aa[0],aa[1]]
cache.set("CD_rfc_"+"_".join(level.split("_")[0:1]),potentials2,60*60*24)
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug, generic_number__label__in=list(potentials.keys())).prefetch_related('protein_segment','display_generic_number','generic_number')
results = {}
for r in rs:
gn = r.generic_number.label
if r.amino_acid!=potentials[gn][0]:
if gn in potentials2:
results[gn] = [r.amino_acid, r.sequence_number,potentials[gn][0],potentials[gn][1]]
jsondata = json.dumps(results)
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("cons_rf_and_class",diff)
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def cons_rm_GP(request, slug, **response_kwargs):
start_time = time.time()
level = Protein.objects.filter(entry_name=slug).values_list('family__slug', flat = True).get()
##PREPARE TM1 LOOKUP DATA
#c_proteins = Construct.objects.filter(protein__family__slug__startswith = level.split("_")[0]).all().values_list('protein__pk', flat = True).distinct()
rf_proteins = Protein.objects.filter(family__slug__startswith="_".join(level.split("_")[0:3]), source__name='SWISSPROT',species__common_name='Human')
align_segments = ProteinSegment.objects.all().filter(slug__in = list(settings.REFERENCE_POSITIONS.keys())).prefetch_related()
amino_acids_stats = {}
amino_acids_groups_stats = {}
a = Alignment()
a.load_proteins(rf_proteins)
a.load_segments(align_segments) #get all segments to make correct diagrams
# build the alignment data matrix
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
s_id = 0
a_id = 0
for ns, segments in a.generic_numbers.items():
for s, num in segments.items():
for n, dn in num.items():
temp = []
temp2 = []
for i, aa in enumerate(AMINO_ACIDS):
temp.append(a.amino_acid_stats[i][s_id][a_id])
for i, aa in enumerate(AMINO_ACID_GROUPS):
temp2.append(a.feature_stats[i][s_id][a_id])
amino_acids_stats[n] = temp
amino_acids_groups_stats[n] = temp2
a_id += 1
s_id += 1
potentials = {}
for seg, aa_list in a.consensus.items():
for gn, aa in aa_list.items():
if int(aa[1])>5: #if conservations is >50%
potentials[gn] = [aa[0],aa[1]]
rs = Residue.objects.filter(protein_conformation__protein__entry_name=slug, generic_number__label__in=list(potentials.keys())).prefetch_related('protein_segment','display_generic_number','generic_number')
results = {}
results2 = {}
for r in rs:
gn = r.generic_number.label
if r.amino_acid in ['G','P']:
if r.amino_acid!=potentials[gn][0]:
results[gn] = [r.amino_acid, r.sequence_number,potentials[gn][0],potentials[gn][1]]
if r.amino_acid=='G' and potentials[gn][0]=='G':
results2[gn] = [r.amino_acid, r.sequence_number,'A',potentials[gn][1]]
jsondata = json.dumps({'non-conserved':results, 'conserved':results2})
response_kwargs['content_type'] = 'application/json'
end_time = time.time()
diff = round(end_time - start_time,1)
print("cons_rm_GP",diff)
return HttpResponse(jsondata, **response_kwargs)
def calculate_conservation(proteins = None, slug = None):
# Return a a dictionary of each generic number and the conserved residue and its frequency
# Can either be used on a list of proteins or on a slug. If slug then use the cached alignment object.
amino_acids_stats = {}
amino_acids_groups_stats = {}
if slug:
try:
# Load alignment
alignment_consensus = AlignmentConsensus.objects.get(slug=slug)
if alignment_consensus.gn_consensus:
alignment_consensus = pickle.loads(alignment_consensus.gn_consensus)
# make sure it has this value, so it's newest version
test = alignment_consensus['1x50'][2]
return alignment_consensus
a = pickle.loads(alignment_consensus.alignment)
except:
print('no saved alignment')
proteins = Protein.objects.filter(family__slug__startswith=slug, source__name='SWISSPROT',species__common_name='Human')
align_segments = ProteinSegment.objects.all().filter(slug__in = list(settings.REFERENCE_POSITIONS.keys())).prefetch_related()
a = Alignment()
a.load_proteins(proteins)
a.load_segments(align_segments)
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
alignment_consensus = None
elif proteins:
align_segments = ProteinSegment.objects.all().filter(slug__in = list(settings.REFERENCE_POSITIONS.keys())).prefetch_related()
a = Alignment()
a.load_proteins(proteins)
a.load_segments(align_segments)
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
num_proteins = len(a.proteins)
# print(a.aa_count)
consensus = {}
for seg, aa_list in a.consensus.items():
for gn, aal in aa_list.items():
aa_count_dict = {}
for aa, num in a.aa_count[seg][gn].items():
if num:
aa_count_dict[aa] = (num,round(num/num_proteins,3))
if 'x' in gn: # only takes those GN positions that are actual 1x50 etc
consensus[gn] = [aal[0],aal[1],aa_count_dict]
if slug and alignment_consensus:
alignment_consensus.gn_consensus = pickle.dumps(consensus)
alignment_consensus.save()
return consensus
| apache-2.0 | -2,259,260,584,328,600,800 | 43.386241 | 381 | 0.556964 | false |
sangeethah/validation-tests | tests/v3_validation/cattlevalidationtest/core/test_rancher_compose_commands.py | 1 | 20881 | from common_fixtures import * # NOQA
RCCOMMANDS_SUBDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resources/rccmds')
logger = logging.getLogger(__name__)
start_project_str = "Starting"
if_compose_data_files = pytest.mark.skipif(
not os.path.isdir(RCCOMMANDS_SUBDIR),
reason='Rancher compose files directory location not set/does not Exist')
@if_compose_data_files
def test_rancher_compose_create_service(client,
rancher_compose_container):
# This method tests the rancher compose create and up commands
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"create", "Creating stack", "rc1.yml")
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
assert service.scale == 3
assert service.name == "test1"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_start_stop(client,
rancher_compose_container):
# This method tests the rancher compose start and stop commands
# Bug #4887 has been filed
# Bug #4933 has been filed [Start command has no response,
# Now "Started" response is being checked. Should be changed if required.
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"stop", "Stopped", rancher_compose="rc1.yml")
# Note: We add a sleep as the stop command does not wait until complete
time.sleep(10)
service = client.wait_success(service)
# Confirm service is inactive and the containers are stopped
assert service.state == "inactive"
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
# Check for containers being stopped
for container in container_list:
assert container.state == "stopped"
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"start -d", "Started", "rc1.yml")
# Confirm service is active and the containers are running
service = client.wait_success(service, 300)
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_start_down(client,
rancher_compose_container):
# This method tests the rancher compose start and down commands
env_name = random_str().replace("-", "")
# Bug #4933 has been filed [Start command has no response,
# Now "Started" response is being checked. Should be changed if required.
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"down", "Stopped", "rc1.yml")
# Note: We add a sleep as the down command does not wait until it completes
time.sleep(10)
service = client.wait_success(service)
# Confirm service is inactive and the containers are stopped
assert service.state == "inactive"
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
# Check for containers being stopped
for container in container_list:
assert container.state == "stopped"
launch_rancher_compose_from_file(client, RCCOMMANDS_SUBDIR,
"dc1.yml", env_name,
"start -d", "Started", "rc1.yml")
# Confirm service is active and the containers are running
service = client.wait_success(service, 300)
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_service_restart(client,
rancher_compose_container):
# This method tests the rancher compose restart command
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc2.yml", env_name,
"up -d", "Creating stack", "rc2.yml")
env, service1 = get_env_service_by_name(client, env_name, "test1")
env, service2 = get_env_service_by_name(client, env_name, "test2")
# Confirm service is active and the containers are running
service1 = client.wait_success(service1, 300)
service2 = client.wait_success(service2, 300)
assert service1.state == "active"
assert service2.state == "active"
check_config_for_service(client, service1, {"test1": "value1"}, 1)
check_config_for_service(client, service2, {"test2": "value2"}, 1)
container_list1 = get_service_container_list(client, service1)
assert len(container_list1) == 4
for container in container_list1:
assert container.state == "running"
assert container.startCount == 1
container_list2 = get_service_container_list(client, service2)
assert len(container_list2) == 4
for con in container_list2:
assert con.state == "running"
assert container.startCount == 1
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc2.yml", env_name,
"restart", "Restarting", "rc2.yml")
env, service1 = get_env_service_by_name(client, env_name, "test1")
env, service2 = get_env_service_by_name(client, env_name, "test2")
# Confirm service is active and the containers are running
service1 = client.wait_success(service1, 300)
service2 = client.wait_success(service2, 300)
assert service1.state == "active"
assert service2.state == "active"
check_config_for_service(client, service1, {"test1": "value1"}, 1)
check_config_for_service(client, service2, {"test2": "value2"}, 1)
container_list1 = get_service_container_list(client, service1)
assert len(container_list1) == 4
for container in container_list1:
assert container.state == "running"
assert container.startCount == 2
container_list2 = get_service_container_list(client, service2)
assert len(container_list2) == 4
for container in container_list2:
assert container.state == "running"
assert container.startCount == 2
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_service_restart_bat_inter(client,
rancher_compose_container):
# This method tests restart command with batchsize and inteval options
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc2.yml", env_name,
"up -d", "Creating stack", "rc2.yml")
env, service1 = get_env_service_by_name(client, env_name, "test1")
env, service2 = get_env_service_by_name(client, env_name, "test2")
# Confirm service is active and the containers are running
service1 = client.wait_success(service1, 300)
service2 = client.wait_success(service2, 300)
assert service1.state == "active"
assert service2.state == "active"
check_config_for_service(client, service1, {"test1": "value1"}, 1)
check_config_for_service(client, service2, {"test2": "value2"}, 1)
container_list1 = get_service_container_list(client, service1)
assert len(container_list1) == 4
for container in container_list1:
assert container.state == "running"
assert container.startCount == 1
container_list2 = get_service_container_list(client, service2)
assert len(container_list2) == 4
for con in container_list2:
assert con.state == "running"
assert container.startCount == 1
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc2.yml", env_name,
"restart --batch-size 2 --interval 100", "Restarting", "rc2.yml")
env, service1 = get_env_service_by_name(client, env_name, "test1")
env, service2 = get_env_service_by_name(client, env_name, "test2")
# Confirm service is active and the containers are running
service1 = client.wait_success(service1, 300)
service2 = client.wait_success(service2, 300)
assert service1.state == "active"
assert service2.state == "active"
check_config_for_service(client, service1, {"test1": "value1"}, 1)
check_config_for_service(client, service2, {"test2": "value2"}, 1)
container_list1 = get_service_container_list(client, service1)
assert len(container_list1) == 4
for container in container_list1:
assert container.state == "running"
assert container.startCount == 2
container_list2 = get_service_container_list(client, service2)
assert len(container_list2) == 4
for container in container_list2:
assert container.state == "running"
assert container.startCount == 2
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_delete(client,
rancher_compose_container):
# This method tests the delete command
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"rm -f", "Deleting", "rc1.yml")
# Confirm service is removed
service = client.wait_success(service, 300)
assert service.state == "removed"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_scale(client,
rancher_compose_container):
# This method tests the scale command
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
# Issue a command to scale up the services
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"scale test1=4", "Setting scale", "rc1.yml")
# Confirm service is active and the containers are running
service = client.wait_success(service, 300)
assert service.state == "active"
container_list = get_service_container_list(client, service)
# Check if the number of containers are incremented correctly
assert len(container_list) == 4
for container in container_list:
assert container.state == "running"
# Issue a command to scale down the services
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"scale test1=3", "Setting scale", "rc1.yml")
# Confirm service is active and the containers are running
service = client.wait_success(service, 300)
assert service.state == "active"
container_list = get_service_container_list(client, service)
# Check if the number of containers are decremented correctly
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_security(client,
rancher_compose_container,
socat_containers):
# This method tests the options in security tab in the UI
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc3.yml", env_name,
"up -d", start_project_str, "rc3.yml")
env, service = get_env_service_by_name(client, env_name, "test3")
# Confirm service is active and the containers are running
assert service.state == "active"
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for con in container_list:
assert con.state == "running"
containers = client.list_container(
externalId=con.externalId,
include="hosts",
removed_null=True)
docker_client = get_docker_client(containers[0].hosts[0])
inspect = docker_client.inspect_container(con.externalId)
logger.info("Checked for containers running " + con.name)
assert inspect["State"]["Running"]
assert inspect["HostConfig"]["Privileged"]
assert inspect["HostConfig"]["Memory"] == 104857600
assert inspect["HostConfig"]["CpuShares"] == 256
assert inspect["HostConfig"]["CapAdd"] == ["AUDIT_CONTROL",
"AUDIT_WRITE"]
assert inspect["HostConfig"]["CapDrop"] == ["BLOCK_SUSPEND",
"CHOWN"]
assert inspect["Config"]["Hostname"] == "rancherhost"
assert inspect["HostConfig"]["PidMode"] == "host"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_log_driver(client,
rancher_compose_container,
socat_containers):
# This test case fails bcos of bug #4773
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc3.yml", env_name,
"up -d", start_project_str, "rc3.yml")
env, service = get_env_service_by_name(client, env_name, "test3")
# Confirm service is active and the containers are running
assert service.state == "active"
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for con in container_list:
assert con.state == "running"
containers = client.list_container(
externalId=con.externalId,
include="hosts",
removed_null=True)
docker_client = get_docker_client(containers[0].hosts[0])
inspect = docker_client.inspect_container(con.externalId)
logger.info("Checked for containers running" + con.name)
assert inspect["State"]["Running"]
assert inspect["HostConfig"]["LogConfig"]["Type"] == "syslog"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_network(client,
rancher_compose_container,
socat_containers):
# This method tests the options in Network tab in the UI
hostname_override = "io.rancher.container.hostname_override"
requested_ip = "io.rancher.container.requested_ip"
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc4.yml", env_name,
"up -d", start_project_str, "rc4.yml")
env, service = get_env_service_by_name(client, env_name, "test4")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service,
{"testrc": "RANCHER_COMPOSE"}, 1)
check_config_for_service(client, service,
{"io.rancher.container.requested_ip":
"209.243.140.21"}, 1)
check_config_for_service(client, service,
{"io.rancher.container.hostname_override":
"container_name"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 2
for con in container_list:
assert con.state == "running"
containers = client.list_container(
externalId=con.externalId,
include="hosts",
removed_null=True)
docker_client = get_docker_client(containers[0].hosts[0])
inspect = docker_client.inspect_container(con.externalId)
logger.info("Checked for containers running " + con.name)
assert inspect["State"]["Running"]
assert inspect["Config"]["Domainname"] == "xyz.com"
assert \
inspect["Config"]["Labels"][hostname_override] \
== "container_name"
assert inspect["Config"]["Labels"][requested_ip] == "209.243.140.21"
dns_list = inspect["HostConfig"]["Dns"]
dnssearch_list = inspect["HostConfig"]["DnsSearch"]
assert "209.243.150.21" in dns_list
assert "www.google.com" in dnssearch_list
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_volume(client,
rancher_compose_container,
socat_containers):
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc5.yml", env_name,
"up -d", start_project_str, "rc5.yml")
env, service = get_env_service_by_name(client, env_name, "test5")
# Confirm service is active and the containers are running
assert service.state == "active"
container_list = get_service_container_list(client, service)
assert len(container_list) == 2
for con in container_list:
assert con.state == "running"
containers = client.list_container(
externalId=con.externalId,
include="hosts",
removed_null=True)
docker_client = get_docker_client(containers[0].hosts[0])
inspect = docker_client.inspect_container(con.externalId)
logger.info("Checked for containers running " + con.name)
assert inspect["State"]["Running"]
assert "testvol:/home:rw" in inspect["HostConfig"]["Binds"]
delete_all(client, [env])
| apache-2.0 | 6,931,296,060,093,040,000 | 38.92543 | 79 | 0.640774 | false |
devilry/devilry-django | devilry/devilry_group/tests/test_feedbackfeed/examiner/test_feedbackfeed_examiner_discuss.py | 1 | 50866 | # -*- coding: utf-8 -*-
from datetime import timedelta
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from django.utils import timezone
from django.conf import settings
from model_bakery import baker
from devilry.devilry_comment import models as comment_models
from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql
from devilry.devilry_group import devilry_group_baker_factories as group_baker
from devilry.devilry_group import models as group_models
from devilry.apps.core import models as core_models
from devilry.devilry_group.tests.test_feedbackfeed.mixins import mixin_feedbackfeed_examiner
from devilry.devilry_group.views.examiner import feedbackfeed_examiner
class MixinTestFeedbackfeedExaminerDiscuss(mixin_feedbackfeed_examiner.MixinTestFeedbackfeedExaminer):
def test_get_examiner_first_attempt_feedback_tab_does_not_exist_if_last_feedbackset_is_published(self):
testgroup = baker.make('core.AssignmentGroup')
group_baker.feedbackset_first_attempt_published(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertFalse(mockresponse.selector.exists('.devilry-group-feedbackfeed-feedback-button'))
def test_get_examiner_first_attempt_feedback_tab_exist_if_last_feedbackset_is_unpublished(self):
testgroup = baker.make('core.AssignmentGroup')
group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertTrue(mockresponse.selector.exists('.devilry-group-feedbackfeed-feedback-button'))
def test_get_examiner_new_attempt_feedback_tab_does_not_exist_if_last_feedbackset_is_published(self):
testgroup = baker.make('core.AssignmentGroup')
group_baker.feedbackset_new_attempt_published(
group=testgroup,
deadline_datetime=timezone.now() + timedelta(days=3))
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertFalse(mockresponse.selector.exists('.devilry-group-feedbackfeed-feedback-button'))
def test_get_examiner_new_attempt_feedback_tab_exist_if_last_feedbackset_is_unpublished(self):
testgroup = baker.make('core.AssignmentGroup')
group_baker.feedbackset_new_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertTrue(mockresponse.selector.exists('.devilry-group-feedbackfeed-feedback-button'))
def test_post_comment_always_to_last_feedbackset(self):
assignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
grading_system_plugin_id=core_models.Assignment
.GRADING_SYSTEM_PLUGIN_ID_PASSEDFAILED)
group = baker.make('core.AssignmentGroup', parentnode=assignment)
examiner = baker.make('core.Examiner',
assignmentgroup=group,
relatedexaminer=baker.make('core.RelatedExaminer'))
feedbackset_first = group_baker.feedbackset_first_attempt_published(group=group)
feedbackset_last = group_baker.feedbackset_new_attempt_unpublished(group=group)
self.mock_http302_postrequest(
cradmin_role=examiner.assignmentgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': group.id},
requestkwargs={
'data': {
'text': 'This is a feedback',
'examiner_add_public_comment': 'unused value',
}
})
comments = group_models.GroupComment.objects.all()
self.assertEqual(len(comments), 1)
self.assertNotEqual(feedbackset_first, comments[0].feedback_set)
self.assertEqual(feedbackset_last, comments[0].feedback_set)
self.assertEqual(2, group_models.FeedbackSet.objects.count())
def test_event_deadline_moved_feedbackset_unpublished(self):
testgroup = baker.make('core.AssignmentGroup')
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
now1 = timezone.now()
new_deadline1 = now1 + timedelta(days=2)
baker.make('devilry_group.FeedbackSetDeadlineHistory',
feedback_set=testfeedbackset,
changed_datetime=now1,
deadline_old=testfeedbackset.deadline_datetime,
deadline_new=new_deadline1)
now2 = timezone.now() + timedelta(days=2)
new_deadline2 = now2 + timedelta(days=4)
baker.make('devilry_group.FeedbackSetDeadlineHistory',
feedback_set=testfeedbackset,
changed_datetime=now2,
deadline_old=testfeedbackset.deadline_datetime,
deadline_new=new_deadline2)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertEqual(mockresponse.selector.count('.devilry-group-feedbackfeed-event-message__deadline-moved'), 2)
self.assertEqual(mockresponse.selector.count('.deadline-move-info'), 2)
def test_event_deadline_moved_feedbackset_published(self):
testgroup = baker.make('core.AssignmentGroup')
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
testfeedbackset = group_baker.feedbackset_first_attempt_published(group=testgroup)
now1 = timezone.now()
new_deadline1 = now1 + timedelta(days=2)
baker.make('devilry_group.FeedbackSetDeadlineHistory',
feedback_set=testfeedbackset,
changed_datetime=now1,
deadline_old=testfeedbackset.deadline_datetime,
deadline_new=new_deadline1)
now2 = timezone.now() + timedelta(days=2)
new_deadline2 = now2 + timedelta(days=4)
baker.make('devilry_group.FeedbackSetDeadlineHistory',
feedback_set=testfeedbackset,
changed_datetime=now2,
deadline_old=testfeedbackset.deadline_datetime,
deadline_new=new_deadline2)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertEqual(mockresponse.selector.count('.devilry-group-feedbackfeed-event-message__deadline-moved'), 2)
self.assertEqual(mockresponse.selector.count('.deadline-move-info'), 2)
def test_get_feedbackset_header_grading_info_passed(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup, grading_points=1)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
)
self.assertEqual(mockresponse.selector.one('.header-grading-info').alltext_normalized, 'passed (1/1)')
def test_get_feedbackset_header_grading_info_failed(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup, grading_points=0)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
)
self.assertEqual(mockresponse.selector.one('.header-grading-info').alltext_normalized, 'failed (0/1)')
def test_get_feedbackset_header_buttons_not_graded(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
)
self.assertEqual(
mockresponse.selector.one('.devilry-group-event__grade-move-deadline-button').alltext_normalized,
'Move deadline')
self.assertFalse(mockresponse.selector.exists('.devilry-group-event__grade-last-edit-button'))
self.assertNotContains(mockresponse.response, 'Edit grade')
self.assertFalse(mockresponse.selector.exists('.devilry-group-event__grade-last-new-attempt-button'))
self.assertNotContains(mockresponse.response, 'Give new attempt')
def test_get_feedbackset_published_move_deadline_button_not_rendered(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
)
self.assertFalse(
mockresponse.selector.exists('.devilry-group-event__grade-move-deadline-button'))
self.assertEqual(
mockresponse.selector.one('.devilry-group-event__grade-last-edit-button').alltext_normalized,
'Edit grade')
self.assertEqual(
mockresponse.selector.one('.devilry-group-event__grade-last-new-attempt-button').alltext_normalized,
'Give new attempt')
def test_get_feedbackset_not_published_only_move_deadline_button_shows(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
)
self.assertEqual(
mockresponse.selector.one('.devilry-group-event__grade-move-deadline-button').alltext_normalized,
'Move deadline')
def test_get_feedbackset_grading_updated_multiple_events_rendered(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='[email protected]', fullname='Test User')
test_feedbackset = group_baker.feedbackset_first_attempt_published(group=testgroup, grading_points=1)
baker.make('devilry_group.FeedbackSetGradingUpdateHistory', feedback_set=test_feedbackset, old_grading_points=1,
updated_by=testuser)
baker.make('devilry_group.FeedbackSetGradingUpdateHistory', feedback_set=test_feedbackset, old_grading_points=0,
updated_by=testuser)
baker.make('devilry_group.FeedbackSetGradingUpdateHistory', feedback_set=test_feedbackset, old_grading_points=1,
updated_by=testuser)
baker.make('devilry_group.FeedbackSetGradingUpdateHistory', feedback_set=test_feedbackset, old_grading_points=0,
updated_by=testuser)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup
)
event_text_list = [element.alltext_normalized for element in
mockresponse.selector.list('.devilry-group-event__grading_updated')]
self.assertEqual(len(event_text_list), 4)
self.assertIn('The grade was changed from passed (1/1) to failed (0/1) by Test User([email protected])', event_text_list[0])
self.assertIn('The grade was changed from failed (0/1) to passed (1/1) by Test User([email protected])', event_text_list[1])
self.assertIn('The grade was changed from passed (1/1) to failed (0/1) by Test User([email protected])', event_text_list[2])
self.assertIn('The grade was changed from failed (0/1) to passed (1/1) by Test User([email protected])', event_text_list[3])
class TestFeedbackfeedExaminerPublicDiscuss(TestCase, MixinTestFeedbackfeedExaminerDiscuss):
viewclass = feedbackfeed_examiner.ExaminerPublicDiscussView
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_get_examiner_add_comment_button(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertTrue(mockresponse.selector.exists('#submit-id-examiner_add_public_comment'))
self.assertEqual(
'Add comment',
mockresponse.selector.one('#submit-id-examiner_add_public_comment').alltext_normalized
)
def test_get_examiner_form_heading(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertTrue(mockresponse.selector.exists('.devilry-group-feedbackfeed-form-heading'))
self.assertEqual(
'Discuss with the student(s). Anything you write or upload here is visible to the student(s), '
'co-examiners (if any), and admins, but it is not considered part of your feedback/grading.',
mockresponse.selector.one('.devilry-group-feedbackfeed-form-heading').alltext_normalized
)
def test_post_comment_mail_sent_to_everyone_in_group_sanity(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
examiner_email = baker.make('devilry_account.UserEmail', user=examiner.relatedexaminer.user,
email='[email protected]')
# Create two examiners with mails
examiner1 = baker.make('core.Examiner', assignmentgroup=testgroup)
examiner1_email = baker.make('devilry_account.UserEmail', user=examiner1.relatedexaminer.user,
email='[email protected]')
examiner2 = baker.make('core.Examiner', assignmentgroup=testgroup)
examiner2_email = baker.make('devilry_account.UserEmail', user=examiner2.relatedexaminer.user,
email='[email protected]')
# Create two students with mails
student1 = baker.make('core.Candidate', assignment_group=testgroup)
student1_email = baker.make('devilry_account.UserEmail', user=student1.relatedstudent.user,
email='[email protected]')
student2 = baker.make('core.Candidate', assignment_group=testgroup)
student2_email = baker.make('devilry_account.UserEmail', user=student2.relatedstudent.user,
email='[email protected]')
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(len(mail.outbox), 4)
recipient_list = []
for outbox in mail.outbox:
recipient_list.append(outbox.recipients()[0])
self.assertIn(examiner1_email.email, recipient_list)
self.assertIn(examiner2_email.email, recipient_list)
self.assertIn(student1_email.email, recipient_list)
self.assertIn(student2_email.email, recipient_list)
self.assertNotIn(examiner_email.email, recipient_list)
def test_post_first_attempt_unpublished_comment_with_text(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
posted_comment = group_models.GroupComment.objects.all()[0]
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE,
posted_comment.visibility)
self.assertEqual('This is a comment', posted_comment.text)
def test_post_first_attempt_published_comment_with_text(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
group_baker.feedbackset_first_attempt_published(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
posted_comment = group_models.GroupComment.objects.all()[0]
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE,
posted_comment.visibility)
self.assertEqual('This is a comment', posted_comment.text)
def test_post_new_attempt_unpublished_comment_with_text(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testfeedbackset = group_baker.feedbackset_new_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(2, group_models.FeedbackSet.objects.count())
last_feedbackset = group_models.FeedbackSet.objects.all()[1]
self.assertEqual(last_feedbackset, testfeedbackset)
self.assertEqual(1, group_models.GroupComment.objects.count())
posted_comment = group_models.GroupComment.objects.all()[0]
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE,
posted_comment.visibility)
self.assertEqual('This is a comment', posted_comment.text)
def test_post_new_attempt_published_comment_with_text(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testfeedbackset = group_baker.feedbackset_new_attempt_published(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(2, group_models.FeedbackSet.objects.count())
last_feedbackset = group_models.FeedbackSet.objects.all()[1]
self.assertEqual(last_feedbackset, testfeedbackset)
self.assertEqual(1, group_models.GroupComment.objects.count())
posted_comment = group_models.GroupComment.objects.all()[0]
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE,
posted_comment.visibility)
self.assertEqual('This is a comment', posted_comment.text)
class TestFeedbackfeedExaminerWithAdminDiscuss(TestCase, MixinTestFeedbackfeedExaminerDiscuss):
viewclass = feedbackfeed_examiner.ExaminerWithAdminsDiscussView
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_get_examiner_add_comment_button(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertTrue(mockresponse.selector.exists('#submit-id-examiner_add_comment_for_examiners_and_admins'))
self.assertEqual(
'Add note',
mockresponse.selector.one('#submit-id-examiner_add_comment_for_examiners_and_admins').alltext_normalized
)
def test_get_examiner_form_heading(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user
)
self.assertTrue(mockresponse.selector.exists('.devilry-group-feedbackfeed-form-heading'))
self.assertEqual(
'Internal notes for this student or project group. Visible only to you, your co-examiners (if any) '
'and admins. Students can not see these notes.',
mockresponse.selector.one('.devilry-group-feedbackfeed-form-heading').alltext_normalized
)
def test_post_comment_mail_only_sent_to_examiners(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
examiner_email = baker.make('devilry_account.UserEmail', user=examiner.relatedexaminer.user,
email='[email protected]')
# Create two examiners with mails
examiner1 = baker.make('core.Examiner', assignmentgroup=testgroup)
examiner1_email = baker.make('devilry_account.UserEmail', user=examiner1.relatedexaminer.user,
email='[email protected]')
examiner2 = baker.make('core.Examiner', assignmentgroup=testgroup)
examiner2_email = baker.make('devilry_account.UserEmail', user=examiner2.relatedexaminer.user,
email='[email protected]')
# Create two students with mails
student1 = baker.make('core.Candidate', assignment_group=testgroup)
student1_email = baker.make('devilry_account.UserEmail', user=student1.relatedstudent.user,
email='[email protected]')
student2 = baker.make('core.Candidate', assignment_group=testgroup)
student2_email = baker.make('devilry_account.UserEmail', user=student2.relatedstudent.user,
email='[email protected]')
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(len(mail.outbox), 2)
recipient_list = []
for outbox in mail.outbox:
recipient_list.append(outbox.recipients()[0])
self.assertIn(examiner1_email.email, recipient_list)
self.assertIn(examiner2_email.email, recipient_list)
self.assertNotIn(student1_email.email, recipient_list)
self.assertNotIn(student2_email.email, recipient_list)
self.assertNotIn(examiner_email.email, recipient_list)
def test_post_first_attempt_unpublished_comment_with_text(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
posted_comment = group_models.GroupComment.objects.all()[0]
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS,
posted_comment.visibility)
self.assertEqual('This is a comment', posted_comment.text)
def test_post_first_attempt_published_comment_with_text(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
group_baker.feedbackset_first_attempt_published(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
posted_comment = group_models.GroupComment.objects.all()[0]
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS,
posted_comment.visibility)
self.assertEqual('This is a comment', posted_comment.text)
def test_post_new_attempt_unpublished_comment_with_text(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testfeedbackset = group_baker.feedbackset_new_attempt_unpublished(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(2, group_models.FeedbackSet.objects.count())
last_feedbackset = group_models.FeedbackSet.objects.all()[1]
self.assertEqual(last_feedbackset, testfeedbackset)
self.assertEqual(1, group_models.GroupComment.objects.count())
posted_comment = group_models.GroupComment.objects.all()[0]
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS,
posted_comment.visibility)
self.assertEqual('This is a comment', posted_comment.text)
def test_post_new_attempt_published_comment_with_text(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testfeedbackset = group_baker.feedbackset_new_attempt_published(group=testgroup)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
self.mock_http302_postrequest(
cradmin_role=testgroup,
requestuser=examiner.relatedexaminer.user,
viewkwargs={'pk': testgroup.id},
requestkwargs={
'data': {
'text': 'This is a comment',
}
})
self.assertEqual(2, group_models.FeedbackSet.objects.count())
last_feedbackset = group_models.FeedbackSet.objects.all()[1]
self.assertEqual(last_feedbackset, testfeedbackset)
self.assertEqual(1, group_models.GroupComment.objects.count())
posted_comment = group_models.GroupComment.objects.all()[0]
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS,
posted_comment.visibility)
self.assertEqual('This is a comment', posted_comment.text)
class TestFeedbackfeedPublicDiscussFileUploadExaminer(TestCase,
mixin_feedbackfeed_examiner.MixinTestFeedbackfeedExaminer):
viewclass = feedbackfeed_examiner.ExaminerPublicDiscussView
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_comment_without_text_or_file_visibility_everyone(self):
# Tests that error message pops up if trying to post a comment without either text or file.
# Posting comment with visibility visible to everyone
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.Examiner', assignmentgroup=testfeedbackset.group)
mockresponse = self.mock_http200_postrequest_htmls(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
'examiner_add_public_comment': 'unused value'
}
})
self.assertEqual(0, group_models.GroupComment.objects.count())
self.assertEqual(
'A comment must have either text or a file attached, or both. An empty comment is not allowed.',
mockresponse.selector.one('#error_1_id_text').alltext_normalized)
def test_upload_single_file_visibility_everyone(self):
# Test that a CommentFile is created on upload.
# Posting comment with visibility visible to everyone
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
temporary_filecollection = group_baker.temporary_file_collection_with_tempfile(
user=testexaminer.relatedexaminer.user)
self.mock_http302_postrequest(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
'temporary_file_collection_id': temporary_filecollection.id
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
self.assertEqual(1, comment_models.CommentFile.objects.count())
def test_upload_single_file_content_visibility_everyone(self):
# Test the content of a CommentFile after upload.
# Posting comment with visibility visible to everyone
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
temporary_filecollection = group_baker.temporary_file_collection_with_tempfiles(
file_list=[
SimpleUploadedFile(name='testfile.txt', content=b'Test content', content_type='text/txt')
],
user=testexaminer.relatedexaminer.user
)
self.mock_http302_postrequest(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
'temporary_file_collection_id': temporary_filecollection.id
}
})
self.assertEqual(1, comment_models.CommentFile.objects.count())
comment_file = comment_models.CommentFile.objects.all()[0]
group_comment = group_models.GroupComment.objects.get(id=comment_file.comment.id)
self.assertEqual(group_comment.visibility, group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE)
self.assertEqual('testfile.txt', comment_file.filename)
self.assertEqual(b'Test content', comment_file.file.file.read())
self.assertEqual(len('Test content'), comment_file.filesize)
self.assertEqual('text/txt', comment_file.mimetype)
def test_upload_multiple_files_visibility_everyone(self):
# Test the content of CommentFiles after upload.
# Posting comment with visibility visible to everyone
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
temporary_filecollection = group_baker.temporary_file_collection_with_tempfiles(
file_list=[
SimpleUploadedFile(name='testfile1.txt', content=b'Test content1', content_type='text/txt'),
SimpleUploadedFile(name='testfile2.txt', content=b'Test content2', content_type='text/txt'),
SimpleUploadedFile(name='testfile3.txt', content=b'Test content3', content_type='text/txt')
],
user=testexaminer.relatedexaminer.user
)
self.mock_http302_postrequest(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
'temporary_file_collection_id': temporary_filecollection.id
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE,
group_models.GroupComment.objects.all()[0].visibility)
self.assertEqual(3, comment_models.CommentFile.objects.count())
def test_upload_multiple_files_contents_visibility_everyone(self):
# Test the content of a CommentFile after upload.
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
temporary_filecollection = group_baker.temporary_file_collection_with_tempfiles(
file_list=[
SimpleUploadedFile(name='testfile1.txt', content=b'Test content1', content_type='text/txt'),
SimpleUploadedFile(name='testfile2.txt', content=b'Test content2', content_type='text/txt'),
SimpleUploadedFile(name='testfile3.txt', content=b'Test content3', content_type='text/txt')
],
user=testexaminer.relatedexaminer.user
)
self.mock_http302_postrequest(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
'temporary_file_collection_id': temporary_filecollection.id
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE,
group_models.GroupComment.objects.all()[0].visibility)
self.assertEqual(3, comment_models.CommentFile.objects.count())
comment_file1 = comment_models.CommentFile.objects.get(filename='testfile1.txt')
comment_file2 = comment_models.CommentFile.objects.get(filename='testfile2.txt')
comment_file3 = comment_models.CommentFile.objects.get(filename='testfile3.txt')
# Check content of testfile 1.
self.assertEqual('testfile1.txt', comment_file1.filename)
self.assertEqual(b'Test content1', comment_file1.file.file.read())
self.assertEqual(len('Test content1'), comment_file1.filesize)
self.assertEqual('text/txt', comment_file1.mimetype)
# Check content of testfile 2.
self.assertEqual('testfile2.txt', comment_file2.filename)
self.assertEqual(b'Test content2', comment_file2.file.file.read())
self.assertEqual(len('Test content2'), comment_file2.filesize)
self.assertEqual('text/txt', comment_file2.mimetype)
# Check content of testfile 3.
self.assertEqual('testfile3.txt', comment_file3.filename)
self.assertEqual(b'Test content3', comment_file3.file.file.read())
self.assertEqual(len(b'Test content3'), comment_file3.filesize)
self.assertEqual('text/txt', comment_file3.mimetype)
def test_upload_files_and_comment_text(self):
# Test the content of a CommentFile after upload.
testfeedbackset = group_baker.feedbackset_first_attempt_published(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
temporary_filecollection = group_baker.temporary_file_collection_with_tempfiles(
file_list=[
SimpleUploadedFile(name='testfile1.txt', content=b'Test content1', content_type='text/txt'),
SimpleUploadedFile(name='testfile2.txt', content=b'Test content2', content_type='text/txt'),
],
user=testexaminer.relatedexaminer.user
)
self.mock_http302_postrequest(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': 'Test comment',
'temporary_file_collection_id': temporary_filecollection.id
}
})
self.assertEqual(2, comment_models.CommentFile.objects.count())
self.assertEqual(1, group_models.GroupComment.objects.count())
group_comments = group_models.GroupComment.objects.all()
self.assertEqual('Test comment', group_comments[0].text)
class TestFeedbackfeedExaminerWithAdminDiscussFileUpload(TestCase,
mixin_feedbackfeed_examiner.MixinTestFeedbackfeedExaminer):
viewclass = feedbackfeed_examiner.ExaminerWithAdminsDiscussView
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_comment_without_text_or_file_visibility_examiners_and_admins(self):
# Tests that error message pops up if trying to post a comment without either text or file.
# Posting comment with visibility for examiners and admins only
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
mockresponse = self.mock_http200_postrequest_htmls(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
}
})
self.assertEqual(0, group_models.GroupComment.objects.count())
self.assertEqual(
'A comment must have either text or a file attached, or both. An empty comment is not allowed.',
mockresponse.selector.one('#error_1_id_text').alltext_normalized)
def test_upload_single_file_visibility_examiners_and_admins(self):
# Test that a CommentFile is created on upload.
# Posting comment with visibility visible to examiners and admins
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
temporary_filecollection = group_baker.temporary_file_collection_with_tempfile(
user=testexaminer.relatedexaminer.user)
self.mock_http302_postrequest(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
'temporary_file_collection_id': temporary_filecollection.id
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS,
group_models.GroupComment.objects.all()[0].visibility)
self.assertEqual(1, comment_models.CommentFile.objects.count())
def test_upload_single_file_content_visibility_examiners_and_admins(self):
# Test the content of a CommentFile after upload.
# Posting comment with visibility visible to examiners and admins
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
temporary_filecollection = group_baker.temporary_file_collection_with_tempfiles(
file_list=[
SimpleUploadedFile(name='testfile.txt', content=b'Test content', content_type='text/txt')
],
user=testexaminer.relatedexaminer.user
)
self.mock_http302_postrequest(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
'temporary_file_collection_id': temporary_filecollection.id
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS,
group_models.GroupComment.objects.all()[0].visibility)
self.assertEqual(1, comment_models.CommentFile.objects.count())
comment_file = comment_models.CommentFile.objects.all()[0]
self.assertEqual('testfile.txt', comment_file.filename)
self.assertEqual(b'Test content', comment_file.file.file.read())
self.assertEqual(len('Test content'), comment_file.filesize)
self.assertEqual('text/txt', comment_file.mimetype)
def test_upload_multiple_files_visibility_examiners_and_admins(self):
# Test the content of CommentFiles after upload.
# Posting comment with visibility visible to everyone
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
temporary_filecollection = group_baker.temporary_file_collection_with_tempfiles(
file_list=[
SimpleUploadedFile(name='testfile1.txt', content=b'Test content1', content_type='text/txt'),
SimpleUploadedFile(name='testfile2.txt', content=b'Test content2', content_type='text/txt'),
SimpleUploadedFile(name='testfile3.txt', content=b'Test content3', content_type='text/txt')
],
user=testexaminer.relatedexaminer.user
)
self.mock_http302_postrequest(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
'examiner_add_comment_for_examiners': 'unused value',
'temporary_file_collection_id': temporary_filecollection.id
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS,
group_models.GroupComment.objects.all()[0].visibility)
self.assertEqual(3, comment_models.CommentFile.objects.count())
def test_upload_multiple_files_contents_visibility_examiners_and_admins(self):
# Test the content of a CommentFile after upload.
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
testexaminer = baker.make('core.examiner', assignmentgroup=testfeedbackset.group)
temporary_filecollection = group_baker.temporary_file_collection_with_tempfiles(
file_list=[
SimpleUploadedFile(name='testfile1.txt', content=b'Test content1', content_type='text/txt'),
SimpleUploadedFile(name='testfile2.txt', content=b'Test content2', content_type='text/txt'),
SimpleUploadedFile(name='testfile3.txt', content=b'Test content3', content_type='text/txt')
],
user=testexaminer.relatedexaminer.user
)
self.mock_http302_postrequest(
cradmin_role=testexaminer.assignmentgroup,
requestuser=testexaminer.relatedexaminer.user,
viewkwargs={'pk': testfeedbackset.group.id},
requestkwargs={
'data': {
'text': '',
'examiner_add_comment_for_examiners': 'unused value',
'temporary_file_collection_id': temporary_filecollection.id
}
})
self.assertEqual(1, group_models.GroupComment.objects.count())
self.assertEqual(group_models.GroupComment.VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS,
group_models.GroupComment.objects.all()[0].visibility)
self.assertEqual(3, comment_models.CommentFile.objects.count())
comment_file1 = comment_models.CommentFile.objects.get(filename='testfile1.txt')
comment_file2 = comment_models.CommentFile.objects.get(filename='testfile2.txt')
comment_file3 = comment_models.CommentFile.objects.get(filename='testfile3.txt')
# Check content of testfile 1.
self.assertEqual('testfile1.txt', comment_file1.filename)
self.assertEqual(b'Test content1', comment_file1.file.file.read())
self.assertEqual(len('Test content1'), comment_file1.filesize)
self.assertEqual('text/txt', comment_file1.mimetype)
# Check content of testfile 2.
self.assertEqual('testfile2.txt', comment_file2.filename)
self.assertEqual(b'Test content2', comment_file2.file.file.read())
self.assertEqual(len('Test content2'), comment_file2.filesize)
self.assertEqual('text/txt', comment_file2.mimetype)
# Check content of testfile 3.
self.assertEqual('testfile3.txt', comment_file3.filename)
self.assertEqual(b'Test content3', comment_file3.file.file.read())
self.assertEqual(len(b'Test content3'), comment_file3.filesize)
self.assertEqual('text/txt', comment_file3.mimetype)
| bsd-3-clause | 61,710,043,098,759,816 | 54.652079 | 131 | 0.657728 | false |
milliman/spark | python/pyspark/sql/tests.py | 1 | 107755 | # -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for pyspark.sql; additional tests are implemented as doctests in
individual modules.
"""
import os
import sys
import subprocess
import pydoc
import shutil
import tempfile
import pickle
import functools
import time
import datetime
import py4j
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark import SparkContext
from pyspark.sql import SparkSession, SQLContext, HiveContext, Column, Row
from pyspark.sql.types import *
from pyspark.sql.types import UserDefinedType, _infer_type
from pyspark.tests import ReusedPySparkTestCase, SparkSubmitTests
from pyspark.sql.functions import UserDefinedFunction, sha2, lit
from pyspark.sql.window import Window
from pyspark.sql.utils import AnalysisException, ParseException, IllegalArgumentException
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class DataTypeTests(unittest.TestCase):
# regression test for SPARK-6055
def test_data_type_eq(self):
lt = LongType()
lt2 = pickle.loads(pickle.dumps(LongType()))
self.assertEqual(lt, lt2)
# regression test for SPARK-7978
def test_decimal_type(self):
t1 = DecimalType()
t2 = DecimalType(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
t3 = DecimalType(8)
self.assertNotEqual(t2, t3)
# regression test for SPARK-10392
def test_datetype_equal_zero(self):
dt = DateType()
self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))
# regression test for SPARK-17035
def test_timestamp_microsecond(self):
tst = TimestampType()
self.assertEqual(tst.toInternal(datetime.datetime.max) % 1000000, 999999)
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
class SQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.spark = SparkSession(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_sqlcontext_reuses_sparksession(self):
sqlContext1 = SQLContext(self.sc)
sqlContext2 = SQLContext(self.sc)
self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)
def test_row_should_be_read_only(self):
row = Row(a=1, b=2)
self.assertEqual(1, row.a)
def foo():
row.a = 3
self.assertRaises(Exception, foo)
row2 = self.spark.range(10).first()
self.assertEqual(0, row2.id)
def foo2():
row2.id = 2
self.assertRaises(Exception, foo2)
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_column_name_encoding(self):
"""Ensure that created columns has `str` type consistently."""
columns = self.spark.createDataFrame([('Alice', 1)], ['name', u'age']).columns
self.assertEqual(columns, ['name', 'age'])
self.assertTrue(isinstance(columns[0], str))
self.assertTrue(isinstance(columns[1], str))
def test_explode(self):
from pyspark.sql.functions import explode
d = [Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"})]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
def test_and_in_expression(self):
self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count())
self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2"))
self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count())
self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2")
self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())
self.assertRaises(ValueError, lambda: not self.df.key == 1)
def test_udf_with_callable(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
class PlusFour:
def __call__(self, col):
if col is not None:
return col + 4
call = PlusFour()
pudf = UserDefinedFunction(call, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf_with_partial_function(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
def some_func(col, param):
if col is not None:
return col + param
pfunc = functools.partial(some_func, param=4)
pudf = UserDefinedFunction(pfunc, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf(self):
self.spark.catalog.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_udf2(self):
self.spark.catalog.registerFunction("strlen", lambda string: len(string), IntegerType())
self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\
.createOrReplaceTempView("test")
[res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(4, res[0])
def test_chained_udf(self):
self.spark.catalog.registerFunction("double", lambda x: x + x, IntegerType())
[row] = self.spark.sql("SELECT double(1)").collect()
self.assertEqual(row[0], 2)
[row] = self.spark.sql("SELECT double(double(1))").collect()
self.assertEqual(row[0], 4)
[row] = self.spark.sql("SELECT double(double(1) + 1)").collect()
self.assertEqual(row[0], 6)
def test_multiple_udfs(self):
self.spark.catalog.registerFunction("double", lambda x: x * 2, IntegerType())
[row] = self.spark.sql("SELECT double(1), double(2)").collect()
self.assertEqual(tuple(row), (2, 4))
[row] = self.spark.sql("SELECT double(double(1)), double(double(2) + 2)").collect()
self.assertEqual(tuple(row), (4, 12))
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
[row] = self.spark.sql("SELECT double(add(1, 2)), add(double(2), 1)").collect()
self.assertEqual(tuple(row), (6, 5))
def test_udf_in_filter_on_top_of_outer_join(self):
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(a=1)])
df = left.join(right, on='a', how='left_outer')
df = df.withColumn('b', udf(lambda x: 'x')(df.a))
self.assertEqual(df.filter('b = "x"').collect(), [Row(a=1, b='x')])
def test_udf_in_filter_on_top_of_join(self):
# regression test for SPARK-18589
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(b=1)])
f = udf(lambda a, b: a == b, BooleanType())
df = left.crossJoin(right).filter(f("a", "b"))
self.assertEqual(df.collect(), [Row(a=1, b=1)])
def test_udf_without_arguments(self):
self.spark.catalog.registerFunction("foo", lambda: "bar")
[row] = self.spark.sql("SELECT foo()").collect()
self.assertEqual(row[0], "bar")
def test_udf_with_array_type(self):
d = [Row(l=list(range(3)), d={"key": list(range(5))})]
rdd = self.sc.parallelize(d)
self.spark.createDataFrame(rdd).createOrReplaceTempView("test")
self.spark.catalog.registerFunction("copylist", lambda l: list(l), ArrayType(IntegerType()))
self.spark.catalog.registerFunction("maplen", lambda d: len(d), IntegerType())
[(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from test").collect()
self.assertEqual(list(range(3)), l1)
self.assertEqual(1, l2)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.spark.catalog.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.spark.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.spark.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_udf_with_filter_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a < 2, BooleanType())
sel = df.select(col("key"), col("value")).filter((my_filter(col("key"))) & (df.value < "2"))
self.assertEqual(sel.collect(), [Row(key=1, value='1')])
def test_udf_with_aggregate_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col, sum
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a == 1, BooleanType())
sel = df.select(col("key")).distinct().filter(my_filter(col("key")))
self.assertEqual(sel.collect(), [Row(key=1)])
my_copy = udf(lambda x: x, IntegerType())
my_add = udf(lambda a, b: int(a + b), IntegerType())
my_strlen = udf(lambda x: len(x), IntegerType())
sel = df.groupBy(my_copy(col("key")).alias("k"))\
.agg(sum(my_strlen(col("value"))).alias("s"))\
.select(my_add(col("k"), col("s")).alias("t"))
self.assertEqual(sel.collect(), [Row(t=4), Row(t=3)])
def test_udf_in_generate(self):
from pyspark.sql.functions import udf, explode
df = self.spark.range(5)
f = udf(lambda x: list(range(x)), ArrayType(LongType()))
row = df.select(explode(f(*df))).groupBy().sum().first()
self.assertEqual(row[0], 10)
df = self.spark.range(3)
res = df.select("id", explode(f(df.id))).collect()
self.assertEqual(res[0][0], 1)
self.assertEqual(res[0][1], 0)
self.assertEqual(res[1][0], 2)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 2)
self.assertEqual(res[2][1], 1)
range_udf = udf(lambda value: list(range(value - 1, value + 1)), ArrayType(IntegerType()))
res = df.select("id", explode(range_udf(df.id))).collect()
self.assertEqual(res[0][0], 0)
self.assertEqual(res[0][1], -1)
self.assertEqual(res[1][0], 0)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 1)
self.assertEqual(res[2][1], 0)
self.assertEqual(res[3][0], 1)
self.assertEqual(res[3][1], 1)
def test_udf_with_order_by_and_limit(self):
from pyspark.sql.functions import udf
my_copy = udf(lambda x: x, IntegerType())
df = self.spark.range(10).orderBy("id")
res = df.select(df.id, my_copy(df.id).alias("copy")).limit(1)
res.explain(True)
self.assertEqual(res.collect(), [Row(id=0, copy=0)])
def test_wholefile_json(self):
people1 = self.spark.read.json("python/test_support/sql/people.json")
people_array = self.spark.read.json("python/test_support/sql/people_array.json",
wholeFile=True)
self.assertEqual(people1.collect(), people_array.collect())
def test_wholefile_csv(self):
ages_newlines = self.spark.read.csv(
"python/test_support/sql/ages_newlines.csv", wholeFile=True)
expected = [Row(_c0=u'Joe', _c1=u'20', _c2=u'Hi,\nI am Jeo'),
Row(_c0=u'Tom', _c1=u'30', _c2=u'My name is Tom'),
Row(_c0=u'Hyukjin', _c1=u'25', _c2=u'I am Hyukjin\n\nI love Spark!')]
self.assertEqual(ages_newlines.collect(), expected)
def test_ignorewhitespace_csv(self):
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.spark.createDataFrame([[" a", "b ", " c "]]).write.csv(
tmpPath,
ignoreLeadingWhiteSpace=False,
ignoreTrailingWhiteSpace=False)
expected = [Row(value=u' a,b , c ')]
readback = self.spark.read.text(tmpPath)
self.assertEqual(readback.collect(), expected)
shutil.rmtree(tmpPath)
def test_read_multiple_orc_file(self):
df = self.spark.read.orc(["python/test_support/sql/orc_partitioned/b=0/c=0",
"python/test_support/sql/orc_partitioned/b=1/c=1"])
self.assertEqual(2, df.count())
def test_udf_with_input_file_name(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
sourceFile = udf(lambda path: path, StringType())
filePath = "python/test_support/sql/people1.json"
row = self.spark.read.json(filePath).select(sourceFile(input_file_name())).first()
self.assertTrue(row[0].find("people1.json") != -1)
def test_udf_with_input_file_name_for_hadooprdd(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
def filename(path):
return path
sameText = udf(filename, StringType())
rdd = self.sc.textFile('python/test_support/sql/people.json')
df = self.spark.read.json(rdd).select(input_file_name().alias('file'))
row = df.select(sameText(df['file'])).first()
self.assertTrue(row[0].find("people.json") != -1)
rdd2 = self.sc.newAPIHadoopFile(
'python/test_support/sql/people.json',
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text')
df2 = self.spark.read.json(rdd2).select(input_file_name().alias('file'))
row2 = df2.select(sameText(df2['file'])).first()
self.assertTrue(row2[0].find("people.json") != -1)
def test_udf_defers_judf_initalization(self):
# This is separate of UDFInitializationTests
# to avoid context initialization
# when udf is called
from pyspark.sql.functions import UserDefinedFunction
f = UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
f._judf_placeholder,
"judf should not be initialized before the first call."
)
self.assertIsInstance(f("foo"), Column, "UDF call should return a Column.")
self.assertIsNotNone(
f._judf_placeholder,
"judf should be initialized after UDF has been called."
)
def test_udf_with_string_return_type(self):
from pyspark.sql.functions import UserDefinedFunction
add_one = UserDefinedFunction(lambda x: x + 1, "integer")
make_pair = UserDefinedFunction(lambda x: (-x, x), "struct<x:integer,y:integer>")
make_array = UserDefinedFunction(
lambda x: [float(x) for x in range(x, x + 3)], "array<double>")
expected = (2, Row(x=-1, y=1), [1.0, 2.0, 3.0])
actual = (self.spark.range(1, 2).toDF("x")
.select(add_one("x"), make_pair("x"), make_array("x"))
.first())
self.assertTupleEqual(expected, actual)
def test_udf_shouldnt_accept_noncallable_object(self):
from pyspark.sql.functions import UserDefinedFunction
from pyspark.sql.types import StringType
non_callable = None
self.assertRaises(TypeError, UserDefinedFunction, non_callable, StringType())
def test_udf_with_decorator(self):
from pyspark.sql.functions import lit, udf
from pyspark.sql.types import IntegerType, DoubleType
@udf(IntegerType())
def add_one(x):
if x is not None:
return x + 1
@udf(returnType=DoubleType())
def add_two(x):
if x is not None:
return float(x + 2)
@udf
def to_upper(x):
if x is not None:
return x.upper()
@udf()
def to_lower(x):
if x is not None:
return x.lower()
@udf
def substr(x, start, end):
if x is not None:
return x[start:end]
@udf("long")
def trunc(x):
return int(x)
@udf(returnType="double")
def as_double(x):
return float(x)
df = (
self.spark
.createDataFrame(
[(1, "Foo", "foobar", 3.0)], ("one", "Foo", "foobar", "float"))
.select(
add_one("one"), add_two("one"),
to_upper("Foo"), to_lower("Foo"),
substr("foobar", lit(0), lit(3)),
trunc("float"), as_double("one")))
self.assertListEqual(
[tpe for _, tpe in df.dtypes],
["int", "double", "string", "string", "string", "bigint", "double"]
)
self.assertListEqual(
list(df.first()),
[2, 3.0, "FOO", "foo", "foo", 3, 1.0]
)
def test_udf_wrapper(self):
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType
def f(x):
"""Identity"""
return x
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_apply_schema_to_row(self):
df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.spark.createDataFrame(input)
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_apply_schema_to_dict_and_rows(self):
schema = StructType().add("b", StringType()).add("a", IntegerType())
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
for verify in [False, True]:
df = self.spark.createDataFrame(input, schema, verifySchema=verify)
df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(10, df3.count())
input = [Row(a=x, b=str(x)) for x in range(10)]
df4 = self.spark.createDataFrame(input, schema, verifySchema=verify)
self.assertEqual(10, df4.count())
def test_create_dataframe_schema_mismatch(self):
input = [Row(a=1)]
rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))
schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())])
df = self.spark.createDataFrame(rdd, schema)
self.assertRaises(Exception, lambda: df.show())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.spark.createDataFrame(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.spark.createDataFrame(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.spark.createDataFrame(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.spark.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.spark.createDataFrame(rdd, schema)
results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
from pyspark.sql.types import _parse_schema_abstract, _infer_schema_type
rdd = self.sc.parallelize([(127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3])])
abstract = "byte1 short1 float1 time1 map1{} struct1(b) list1[]"
schema = _parse_schema_abstract(abstract)
typedSchema = _infer_schema_type(rdd.first(), schema)
df = self.spark.createDataFrame(rdd, typedSchema)
r = (127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1), {"a": 1}, Row(b=2), [1, 2, 3])
self.assertEqual(r, tuple(df.first()))
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _verify_type
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_verify_type(ExamplePoint(1.0, 2.0), ExamplePointUDT())
self.assertRaises(ValueError, lambda: _verify_type([1.0, 2.0], ExamplePointUDT()))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_verify_type(PythonOnlyPoint(1.0, 2.0), PythonOnlyUDT())
self.assertRaises(ValueError, lambda: _verify_type([1.0, 2.0], PythonOnlyUDT()))
def test_simple_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.show()
def test_nested_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", ArrayType(PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, [PythonOnlyPoint(float(i), float(i))]) for i in range(10)],
schema=schema)
df.collect()
schema = StructType().add("key", LongType()).add("val",
MapType(LongType(), PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, {i % 3: PythonOnlyPoint(float(i + 1), float(i + 1))}) for i in range(10)],
schema=schema)
df.collect()
def test_complex_nested_udt_in_df(self):
from pyspark.sql.functions import udf
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.collect()
gd = df.groupby("key").agg({"val": "collect_list"})
gd.collect()
udf = udf(lambda k, v: [(k, v[0])], ArrayType(df.schema))
gd.select(udf(*gd)).collect()
def test_udt_with_none(self):
df = self.spark.range(0, 10, 1, 1)
def myudf(x):
if x > 0:
return PythonOnlyPoint(float(x), float(x))
self.spark.catalog.registerFunction("udf", myudf, PythonOnlyUDT())
rows = [r[0] for r in df.selectExpr("udf(id)").take(2)]
self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)])
def test_infer_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_apply_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = (1.0, ExamplePoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = (1.0, PythonOnlyPoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_udf_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())
self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())
self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
def test_parquet_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
output_dir = os.path.join(self.tempdir.name, "labeled_point")
df0.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
df0.write.parquet(output_dir, mode='overwrite')
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_union_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row1 = (1.0, ExamplePoint(1.0, 2.0))
row2 = (2.0, ExamplePoint(3.0, 4.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df1 = self.spark.createDataFrame([row1], schema)
df2 = self.spark.createDataFrame([row2], schema)
result = df1.union(df2).orderBy("label").collect()
self.assertEqual(
result,
[
Row(label=1.0, point=ExamplePoint(1.0, 2.0)),
Row(label=2.0, point=ExamplePoint(3.0, 4.0))
]
)
def test_column_operators(self):
ci = self.df.key
cs = self.df.value
c = ci == cs
self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column))
rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1)
self.assertTrue(all(isinstance(c, Column) for c in rcc))
cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]
self.assertTrue(all(isinstance(c, Column) for c in cb))
cbool = (ci & ci), (ci | ci), (~ci)
self.assertTrue(all(isinstance(c, Column) for c in cbool))
css = cs.contains('a'), cs.like('a'), cs.rlike('a'), cs.asc(), cs.desc(),\
cs.startswith('a'), cs.endswith('a'), ci.eqNullSafe(cs)
self.assertTrue(all(isinstance(c, Column) for c in css))
self.assertTrue(isinstance(ci.cast(LongType()), Column))
self.assertRaisesRegexp(ValueError,
"Cannot apply 'in' operator against a column",
lambda: 1 in cs)
def test_column_getitem(self):
from pyspark.sql.functions import col
self.assertIsInstance(col("foo")[1:3], Column)
self.assertIsInstance(col("foo")[0], Column)
self.assertIsInstance(col("foo")["bar"], Column)
self.assertRaises(ValueError, lambda: col("foo")[0:10:2])
def test_column_select(self):
df = self.df
self.assertEqual(self.testData, df.select("*").collect())
self.assertEqual(self.testData, df.select(df.key, df.value).collect())
self.assertEqual([Row(value='1')], df.where(df.key == 1).select(df.value).collect())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
def test_first_last_ignorenulls(self):
from pyspark.sql import functions
df = self.spark.range(0, 100)
df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id"))
df3 = df2.select(functions.first(df2.id, False).alias('a'),
functions.first(df2.id, True).alias('b'),
functions.last(df2.id, False).alias('c'),
functions.last(df2.id, True).alias('d'))
self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())
def test_approxQuantile(self):
df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF()
aq = df.stat.approxQuantile("a", [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aq, list))
self.assertEqual(len(aq), 3)
self.assertTrue(all(isinstance(q, float) for q in aq))
aqs = df.stat.approxQuantile(["a", "b"], [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqs, list))
self.assertEqual(len(aqs), 2)
self.assertTrue(isinstance(aqs[0], list))
self.assertEqual(len(aqs[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[0]))
self.assertTrue(isinstance(aqs[1], list))
self.assertEqual(len(aqs[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[1]))
aqt = df.stat.approxQuantile(("a", "b"), [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqt, list))
self.assertEqual(len(aqt), 2)
self.assertTrue(isinstance(aqt[0], list))
self.assertEqual(len(aqt[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[0]))
self.assertTrue(isinstance(aqt[1], list))
self.assertEqual(len(aqt[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[1]))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(("a", 123), [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(["a", 123], [0.1, 0.9], 0.1))
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr("a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov("a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab("a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_array_contains_function(self):
from pyspark.sql.functions import array_contains
df = self.spark.createDataFrame([(["1", "2", "3"],), ([],)], ['data'])
actual = df.select(array_contains(df.data, 1).alias('b')).collect()
# The value argument can be implicitly castable to the element's type of the array.
self.assertEqual([Row(b=True), Row(b=False)], actual)
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_struct_type(self):
from pyspark.sql.types import StructType, StringType, StructField
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1, struct2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1, struct2)
# Catch exception raised during improper construction
with self.assertRaises(ValueError):
struct1 = StructType().add("name")
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
for field in struct1:
self.assertIsInstance(field, StructField)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertEqual(len(struct1), 2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertIs(struct1["f1"], struct1.fields[0])
self.assertIs(struct1[0], struct1.fields[0])
self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1]))
with self.assertRaises(KeyError):
not_a_field = struct1["f9"]
with self.assertRaises(IndexError):
not_a_field = struct1[9]
with self.assertRaises(TypeError):
not_a_field = struct1[9.9]
def test_metadata_null(self):
from pyspark.sql.types import StructType, StringType, StructField
schema = StructType([StructField("f1", StringType(), True, None),
StructField("f2", StringType(), True, {'a': None})])
rdd = self.sc.parallelize([["a", "b"], ["c", "d"]])
self.spark.createDataFrame(rdd, schema)
def test_save_and_load(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.json(tmpPath, "overwrite")
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.save(format="json", mode="overwrite", path=tmpPath,
noUse="this options will not be used in save.")
actual = self.spark.read.load(format="json", path=tmpPath,
noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
csvpath = os.path.join(tempfile.mkdtemp(), 'data')
df.write.option('quote', None).format('csv').save(csvpath)
shutil.rmtree(tmpPath)
def test_save_and_load_builder(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.mode("overwrite").json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.mode("overwrite").options(noUse="this options will not be used in save.")\
.option("noUse", "this option will not be used in save.")\
.format("json").save(path=tmpPath)
actual =\
self.spark.read.format("json")\
.load(path=tmpPath, noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_stream_trigger(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
# Should take at least one arg
try:
df.writeStream.trigger()
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(once=True, processingTime='5 seconds')
except ValueError:
pass
# Should take only keyword args
try:
df.writeStream.trigger('5 seconds')
self.fail("Should have thrown an exception")
except TypeError:
pass
def test_stream_read_options(self):
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream\
.format('text')\
.option('path', 'python/test_support/sql/streaming')\
.schema(schema)\
.load()
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_read_options_overwrite(self):
bad_schema = StructType([StructField("test", IntegerType(), False)])
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream.format('csv').option('path', 'python/test_support/sql/fake') \
.schema(bad_schema)\
.load(path='python/test_support/sql/streaming', schema=schema, format='text')
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_save_options(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') \
.withColumn('id', lit(1))
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \
.format('parquet').partitionBy('id').outputMode('append').option('path', out).start()
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_save_options_overwrite(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
fake1 = os.path.join(tmpPath, 'fake1')
fake2 = os.path.join(tmpPath, 'fake2')
q = df.writeStream.option('checkpointLocation', fake1)\
.format('memory').option('path', fake2) \
.queryName('fake_query').outputMode('append') \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
self.assertFalse(os.path.isdir(fake1)) # should not have been created
self.assertFalse(os.path.isdir(fake2)) # should not have been created
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_status_and_progress(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
def func(x):
time.sleep(1)
return x
from pyspark.sql.functions import col, udf
sleep_udf = udf(func)
# Use "sleep_udf" to delay the progress update so that we can test `lastProgress` when there
# were no updates.
q = df.select(sleep_udf(col("value")).alias('value')).writeStream \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
# "lastProgress" will return None in most cases. However, as it may be flaky when
# Jenkins is very slow, we don't assert it. If there is something wrong, "lastProgress"
# may throw error with a high chance and make this test flaky, so we should still be
# able to detect broken codes.
q.lastProgress
q.processAllAvailable()
lastProgress = q.lastProgress
recentProgress = q.recentProgress
status = q.status
self.assertEqual(lastProgress['name'], q.name)
self.assertEqual(lastProgress['id'], q.id)
self.assertTrue(any(p == lastProgress for p in recentProgress))
self.assertTrue(
"message" in status and
"isDataAvailable" in status and
"isTriggerActive" in status)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
q.awaitTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = q.awaitTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_exception(self):
sdf = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
sq = sdf.writeStream.format('memory').queryName('query_explain').start()
try:
sq.processAllAvailable()
self.assertEqual(sq.exception(), None)
finally:
sq.stop()
from pyspark.sql.functions import col, udf
from pyspark.sql.utils import StreamingQueryException
bad_udf = udf(lambda x: 1 / 0)
sq = sdf.select(bad_udf(col("value")))\
.writeStream\
.format('memory')\
.queryName('this_query')\
.start()
try:
# Process some data to fail the query
sq.processAllAvailable()
self.fail("bad udf should fail the query")
except StreamingQueryException as e:
# This is expected
self.assertTrue("ZeroDivisionError" in e.desc)
finally:
sq.stop()
self.assertTrue(type(sq.exception()) is StreamingQueryException)
self.assertTrue("ZeroDivisionError" in sq.exception().desc)
def test_query_manager_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
self.spark._wrapped.streams.awaitAnyTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = self.spark._wrapped.streams.awaitAnyTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_access_column(self):
df = self.df
self.assertTrue(isinstance(df.key, Column))
self.assertTrue(isinstance(df['key'], Column))
self.assertTrue(isinstance(df[0], Column))
self.assertRaises(IndexError, lambda: df[2])
self.assertRaises(AnalysisException, lambda: df["bad_key"])
self.assertRaises(TypeError, lambda: df[{}])
def test_column_name_with_non_ascii(self):
if sys.version >= '3':
columnName = "数量"
self.assertTrue(isinstance(columnName, str))
else:
columnName = unicode("数量", "utf-8")
self.assertTrue(isinstance(columnName, unicode))
schema = StructType([StructField(columnName, LongType(), True)])
df = self.spark.createDataFrame([(1,)], schema)
self.assertEqual(schema, df.schema)
self.assertEqual("DataFrame[数量: bigint]", str(df))
self.assertEqual([("数量", 'bigint')], df.dtypes)
self.assertEqual(1, df.select("数量").first()[0])
self.assertEqual(1, df.select(df["数量"]).first()[0])
def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])
self.assertEqual(1, df.select(df.r.a).first()[0])
self.assertEqual("b", df.select(df.r.getField("b")).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
self.assertEqual("v", df.select(df.d.getItem("k")).first()[0])
def test_field_accessor(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.r["a"]).first()[0])
self.assertEqual(1, df.select(df["r.a"]).first()[0])
self.assertEqual("b", df.select(df.r["b"]).first()[0])
self.assertEqual("b", df.select(df["r.b"]).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
def test_infer_long_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
df = self.sc.parallelize(longrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, LongType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_long_type")
df.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(100000000000000, df1.first().f2)
self.assertEqual(_infer_type(1), LongType())
self.assertEqual(_infer_type(2**10), LongType())
self.assertEqual(_infer_type(2**20), LongType())
self.assertEqual(_infer_type(2**31 - 1), LongType())
self.assertEqual(_infer_type(2**31), LongType())
self.assertEqual(_infer_type(2**61), LongType())
self.assertEqual(_infer_type(2**71), LongType())
def test_filter_with_datetime(self):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.spark.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.spark.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.sql.tests import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.spark.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
# regression test for SPARK-19561
def test_datetime_at_epoch(self):
epoch = datetime.datetime.fromtimestamp(0)
df = self.spark.createDataFrame([Row(date=epoch)])
first = df.select('date', lit(epoch).alias('lit_date')).first()
self.assertEqual(first['date'], epoch)
self.assertEqual(first['lit_date'], epoch)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.spark.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame([(u'Alice', None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with string
row = self.spark.createDataFrame([(None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_bitwise_operations(self):
from pyspark.sql import functions
row = Row(a=170, b=75)
df = self.spark.createDataFrame([row])
result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict()
self.assertEqual(170 & 75, result['(a & b)'])
result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict()
self.assertEqual(170 | 75, result['(a | b)'])
result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict()
self.assertEqual(170 ^ 75, result['(a ^ b)'])
result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict()
self.assertEqual(~75, result['~b'])
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.spark.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["length(a)"])
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# should fail if subset is not list, tuple or None
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(ValueError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
def test_capture_analysis_exception(self):
self.assertRaises(AnalysisException, lambda: self.spark.sql("select abc"))
self.assertRaises(AnalysisException, lambda: self.df.selectExpr("a + b"))
def test_capture_parse_exception(self):
self.assertRaises(ParseException, lambda: self.spark.sql("abc"))
def test_capture_illegalargument_exception(self):
self.assertRaisesRegexp(IllegalArgumentException, "Setting negative mapred.reduce.tasks",
lambda: self.spark.sql("SET mapred.reduce.tasks=-1"))
df = self.spark.createDataFrame([(1, 2)], ["a", "b"])
self.assertRaisesRegexp(IllegalArgumentException, "1024 is not in the permitted values",
lambda: df.select(sha2(df.a, 1024)).collect())
try:
df.select(sha2(df.a, 1024)).collect()
except IllegalArgumentException as e:
self.assertRegexpMatches(e.desc, "1024 is not in the permitted values")
self.assertRegexpMatches(e.stackTrace,
"org.apache.spark.sql.functions")
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_conf(self):
spark = self.spark
spark.conf.set("bogo", "sipeo")
self.assertEqual(spark.conf.get("bogo"), "sipeo")
spark.conf.set("bogo", "ta")
self.assertEqual(spark.conf.get("bogo"), "ta")
self.assertEqual(spark.conf.get("bogo", "not.read"), "ta")
self.assertEqual(spark.conf.get("not.set", "ta"), "ta")
self.assertRaisesRegexp(Exception, "not.set", lambda: spark.conf.get("not.set"))
spark.conf.unset("bogo")
self.assertEqual(spark.conf.get("bogo", "colombia"), "colombia")
def test_current_database(self):
spark = self.spark
spark.catalog._reset()
self.assertEquals(spark.catalog.currentDatabase(), "default")
spark.sql("CREATE DATABASE some_db")
spark.catalog.setCurrentDatabase("some_db")
self.assertEquals(spark.catalog.currentDatabase(), "some_db")
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.setCurrentDatabase("does_not_exist"))
def test_list_databases(self):
spark = self.spark
spark.catalog._reset()
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(databases, ["default"])
spark.sql("CREATE DATABASE some_db")
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(sorted(databases), ["default", "some_db"])
def test_list_tables(self):
from pyspark.sql.catalog import Table
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
self.assertEquals(spark.catalog.listTables(), [])
self.assertEquals(spark.catalog.listTables("some_db"), [])
spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet")
tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
tablesDefault = sorted(spark.catalog.listTables("default"), key=lambda t: t.name)
tablesSomeDb = sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name)
self.assertEquals(tables, tablesDefault)
self.assertEquals(len(tables), 2)
self.assertEquals(len(tablesSomeDb), 2)
self.assertEquals(tables[0], Table(
name="tab1",
database="default",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tables[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertEquals(tablesSomeDb[0], Table(
name="tab2",
database="some_db",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tablesSomeDb[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listTables("does_not_exist"))
def test_list_functions(self):
from pyspark.sql.catalog import Function
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
functions = dict((f.name, f) for f in spark.catalog.listFunctions())
functionsDefault = dict((f.name, f) for f in spark.catalog.listFunctions("default"))
self.assertTrue(len(functions) > 200)
self.assertTrue("+" in functions)
self.assertTrue("like" in functions)
self.assertTrue("month" in functions)
self.assertTrue("to_date" in functions)
self.assertTrue("to_timestamp" in functions)
self.assertTrue("to_unix_timestamp" in functions)
self.assertTrue("current_database" in functions)
self.assertEquals(functions["+"], Function(
name="+",
description=None,
className="org.apache.spark.sql.catalyst.expressions.Add",
isTemporary=True))
self.assertEquals(functions, functionsDefault)
spark.catalog.registerFunction("temp_func", lambda x: str(x))
spark.sql("CREATE FUNCTION func1 AS 'org.apache.spark.data.bricks'")
spark.sql("CREATE FUNCTION some_db.func2 AS 'org.apache.spark.data.bricks'")
newFunctions = dict((f.name, f) for f in spark.catalog.listFunctions())
newFunctionsSomeDb = dict((f.name, f) for f in spark.catalog.listFunctions("some_db"))
self.assertTrue(set(functions).issubset(set(newFunctions)))
self.assertTrue(set(functions).issubset(set(newFunctionsSomeDb)))
self.assertTrue("temp_func" in newFunctions)
self.assertTrue("func1" in newFunctions)
self.assertTrue("func2" not in newFunctions)
self.assertTrue("temp_func" in newFunctionsSomeDb)
self.assertTrue("func1" not in newFunctionsSomeDb)
self.assertTrue("func2" in newFunctionsSomeDb)
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listFunctions("does_not_exist"))
def test_list_columns(self):
from pyspark.sql.catalog import Column
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet")
columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name)
columnsDefault = sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name)
self.assertEquals(columns, columnsDefault)
self.assertEquals(len(columns), 2)
self.assertEquals(columns[0], Column(
name="age",
description=None,
dataType="int",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns[1], Column(
name="name",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
columns2 = sorted(spark.catalog.listColumns("tab2", "some_db"), key=lambda c: c.name)
self.assertEquals(len(columns2), 2)
self.assertEquals(columns2[0], Column(
name="nickname",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns2[1], Column(
name="tolerance",
description=None,
dataType="float",
nullable=True,
isPartition=False,
isBucket=False))
self.assertRaisesRegexp(
AnalysisException,
"tab2",
lambda: spark.catalog.listColumns("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listColumns("does_not_exist"))
def test_cache(self):
spark = self.spark
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def test_read_text_file_list(self):
df = self.spark.read.text(['python/test_support/sql/text-test.txt',
'python/test_support/sql/text-test.txt'])
count = df.count()
self.assertEquals(count, 4)
def test_BinaryType_serialization(self):
# Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808
schema = StructType([StructField('mybytes', BinaryType())])
data = [[bytearray(b'here is my data')],
[bytearray(b'and here is some more')]]
df = self.spark.createDataFrame(data, schema=schema)
df.collect()
class HiveSparkSubmitTests(SparkSubmitTests):
def test_hivecontext(self):
# This test checks that HiveContext is using Hive metastore (SPARK-16224).
# It sets a metastore url and checks if there is a derby dir created by
# Hive metastore. If this derby dir exists, HiveContext is using
# Hive metastore.
metastore_path = os.path.join(tempfile.mkdtemp(), "spark16224_metastore_db")
metastore_URL = "jdbc:derby:;databaseName=" + metastore_path + ";create=true"
hive_site_dir = os.path.join(self.programDir, "conf")
hive_site_file = self.createTempFile("hive-site.xml", ("""
|<configuration>
| <property>
| <name>javax.jdo.option.ConnectionURL</name>
| <value>%s</value>
| </property>
|</configuration>
""" % metastore_URL).lstrip(), "conf")
script = self.createTempFile("test.py", """
|import os
|
|from pyspark.conf import SparkConf
|from pyspark.context import SparkContext
|from pyspark.sql import HiveContext
|
|conf = SparkConf()
|sc = SparkContext(conf=conf)
|hive_context = HiveContext(sc)
|print(hive_context.sql("show databases").collect())
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,1024]",
"--driver-class-path", hive_site_dir, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("default", out.decode('utf-8'))
self.assertTrue(os.path.exists(metastore_path))
class SQLTests2(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
def test_sparksession_with_stopped_sparkcontext(self):
self.sc.stop()
sc = SparkContext('local[4]', self.sc.appName)
spark = SparkSession.builder.getOrCreate()
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
class UDFInitializationTests(unittest.TestCase):
def tearDown(self):
if SparkSession._instantiatedSession is not None:
SparkSession._instantiatedSession.stop()
if SparkContext._active_spark_context is not None:
SparkContext._active_spark_contex.stop()
def test_udf_init_shouldnt_initalize_context(self):
from pyspark.sql.functions import UserDefinedFunction
UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
SparkContext._active_spark_context,
"SparkContext shouldn't be initialized when UserDefinedFunction is created."
)
self.assertIsNone(
SparkSession._instantiatedSession,
"SparkSession shouldn't be initialized when UserDefinedFunction is created."
)
class HiveContextSQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
except TypeError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
os.unlink(cls.tempdir.name)
cls.spark = HiveContext._createForTesting(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.sc.parallelize(cls.testData).toDF()
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_save_and_load_table(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath)
actual = self.spark.createExternalTable("externalJsonTable", tmpPath, "json")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE externalJsonTable")
df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath)
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.createExternalTable("externalJsonTable", source="json",
schema=schema, path=tmpPath,
noUse="this options will not be used")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
defaultDataSourceName = self.spark.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite")
actual = self.spark.createExternalTable("externalJsonTable", path=tmpPath)
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_window_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_cumulative_sum(self):
df = self.spark.createDataFrame([("one", 1), ("two", 2)], ["key", "value"])
from pyspark.sql import functions as F
# Test cumulative sum
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values less than JVM's Long.MinValue and make sure we don't overflow
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow
frame_end = Window.unboundedFollowing + 1
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end)))
rs = sorted(sel.collect())
expected = [("one", 3), ("two", 2)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_collect_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql import functions
self.assertEqual(
sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),
[1, 2])
self.assertEqual(
sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),
[1, 1, 1, 2])
self.assertEqual(
sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),
["1", "2"])
self.assertEqual(
sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),
["1", "2", "2", "2"])
def test_limit_and_take(self):
df = self.spark.range(1, 1000, numPartitions=10)
def assert_runs_only_one_job_stage_and_task(job_group_name, f):
tracker = self.sc.statusTracker()
self.sc.setJobGroup(job_group_name, description="")
f()
jobs = tracker.getJobIdsForGroup(job_group_name)
self.assertEqual(1, len(jobs))
stages = tracker.getJobInfo(jobs[0]).stageIds
self.assertEqual(1, len(stages))
self.assertEqual(1, tracker.getStageInfo(stages[0]).numTasks)
# Regression test for SPARK-10731: take should delegate to Scala implementation
assert_runs_only_one_job_stage_and_task("take", lambda: df.take(1))
# Regression test for SPARK-17514: limit(n).collect() should the perform same as take(n)
assert_runs_only_one_job_stage_and_task("collect_limit", lambda: df.limit(1).collect())
def test_datetime_functions(self):
from pyspark.sql import functions
from datetime import date, datetime
df = self.spark.range(1).selectExpr("'2017-01-22' as dateCol")
parse_result = df.select(functions.to_date(functions.col("dateCol"))).first()
self.assertEquals(date(2017, 1, 22), parse_result['to_date(dateCol)'])
@unittest.skipIf(sys.version_info < (3, 3), "Unittest < 3.3 doesn't support mocking")
def test_unbounded_frames(self):
from unittest.mock import patch
from pyspark.sql import functions as F
from pyspark.sql import window
import importlib
df = self.spark.range(0, 3)
def rows_frame_match():
return "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rowsBetween(-sys.maxsize, sys.maxsize))
).columns[0]
def range_frame_match():
return "RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rangeBetween(-sys.maxsize, sys.maxsize))
).columns[0]
with patch("sys.maxsize", 2 ** 31 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 63 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 127 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
importlib.reload(window)
if __name__ == "__main__":
from pyspark.sql.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
| apache-2.0 | 1,648,245,848,236,969,200 | 42.07517 | 100 | 0.58984 | false |
sideeffects/pycparser | pycparser/c_parser.py | 1 | 60688 | #------------------------------------------------------------------------------
# pycparser: c_parser.py
#
# CParser class: Parser and AST builder for the C language
#
# Copyright (C) 2008-2013, Eli Bendersky
# License: BSD
#------------------------------------------------------------------------------
import re
from .ply import yacc
from . import c_ast
from .c_lexer import CLexer
from .plyparser import PLYParser, Coord, ParseError
from .ast_transforms import fix_switch_cases
class CParser(PLYParser):
def __init__(
self,
lex_optimize=True,
lextab='pycparser.lextab',
yacc_optimize=True,
yacctab='pycparser.yacctab',
yacc_debug=False):
""" Create a new CParser.
Some arguments for controlling the debug/optimization
level of the parser are provided. The defaults are
tuned for release/performance mode.
The simple rules for using them are:
*) When tweaking CParser/CLexer, set these to False
*) When releasing a stable parser, set to True
lex_optimize:
Set to False when you're modifying the lexer.
Otherwise, changes in the lexer won't be used, if
some lextab.py file exists.
When releasing with a stable lexer, set to True
to save the re-generation of the lexer table on
each run.
lextab:
Points to the lex table that's used for optimized
mode. Only if you're modifying the lexer and want
some tests to avoid re-generating the table, make
this point to a local lex table file (that's been
earlier generated with lex_optimize=True)
yacc_optimize:
Set to False when you're modifying the parser.
Otherwise, changes in the parser won't be used, if
some parsetab.py file exists.
When releasing with a stable parser, set to True
to save the re-generation of the parser table on
each run.
yacctab:
Points to the yacc table that's used for optimized
mode. Only if you're modifying the parser, make
this point to a local yacc table file
yacc_debug:
Generate a parser.out file that explains how yacc
built the parsing table from the grammar.
"""
self.clex = CLexer(
error_func=self._lex_error_func,
on_lbrace_func=self._lex_on_lbrace_func,
on_rbrace_func=self._lex_on_rbrace_func,
type_lookup_func=self._lex_type_lookup_func)
self.clex.build(
optimize=lex_optimize,
lextab=lextab)
self.tokens = self.clex.tokens
rules_with_opt = [
'abstract_declarator',
'assignment_expression',
'declaration_list',
'declaration_specifiers',
'designation',
'expression',
'identifier_list',
'init_declarator_list',
'parameter_type_list',
'specifier_qualifier_list',
'block_item_list',
'type_qualifier_list',
'struct_declarator_list'
]
for rule in rules_with_opt:
self._create_opt_rule(rule)
self.cparser = yacc.yacc(
module=self,
start='translation_unit_or_empty',
debug=yacc_debug,
optimize=yacc_optimize,
tabmodule=yacctab)
# Stack of scopes for keeping track of symbols. _scope_stack[-1] is
# the current (topmost) scope. Each scope is a dictionary that
# specifies whether a name is a type. If _scope_stack[n][name] is
# True, 'name' is currently a type in the scope. If it's False,
# 'name' is used in the scope but not as a type (for instance, if we
# saw: int name;
# If 'name' is not a key in _scope_stack[n] then 'name' was not defined
# in this scope at all.
self._scope_stack = [dict()]
# Keeps track of the last token given to yacc (the lookahead token)
self._last_yielded_token = None
def parse(self, text, filename='', debuglevel=0):
""" Parses C code and returns an AST.
text:
A string containing the C source code
filename:
Name of the file being parsed (for meaningful
error messages)
debuglevel:
Debug level to yacc
"""
self.clex.filename = filename
self.clex.reset_lineno()
self._scope_stack = [dict()]
self._last_yielded_token = None
return self.cparser.parse(
input=text,
lexer=self.clex,
debug=debuglevel)
######################-- PRIVATE --######################
def _push_scope(self):
self._scope_stack.append(dict())
def _pop_scope(self):
assert len(self._scope_stack) > 1
self._scope_stack.pop()
def _add_typedef_name(self, name, coord):
""" Add a new typedef name (ie a TYPEID) to the current scope
"""
if not self._scope_stack[-1].get(name, True):
self._parse_error(
"Typedef %r previously declared as non-typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = True
def _add_identifier(self, name, coord):
""" Add a new object, function, or enum member name (ie an ID) to the
current scope
"""
if self._scope_stack[-1].get(name, False):
self._parse_error(
"Non-typedef %r previously declared as typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = False
def _is_type_in_scope(self, name):
""" Is *name* a typedef-name in the current scope?
"""
for scope in reversed(self._scope_stack):
# If name is an identifier in this scope it shadows typedefs in
# higher scopes.
in_scope = scope.get(name)
if in_scope is not None: return in_scope
return False
def _lex_error_func(self, msg, line, column):
self._parse_error(msg, self._coord(line, column))
def _lex_on_lbrace_func(self):
self._push_scope()
def _lex_on_rbrace_func(self):
self._pop_scope()
def _lex_type_lookup_func(self, name):
""" Looks up types that were previously defined with
typedef.
Passed to the lexer for recognizing identifiers that
are types.
"""
is_type = self._is_type_in_scope(name)
return is_type
def _get_yacc_lookahead_token(self):
""" We need access to yacc's lookahead token in certain cases.
This is the last token yacc requested from the lexer, so we
ask the lexer.
"""
return self.clex.last_token
# To understand what's going on here, read sections A.8.5 and
# A.8.6 of K&R2 very carefully.
#
# A C type consists of a basic type declaration, with a list
# of modifiers. For example:
#
# int *c[5];
#
# The basic declaration here is 'int c', and the pointer and
# the array are the modifiers.
#
# Basic declarations are represented by TypeDecl (from module
# c_ast) and the modifiers are FuncDecl, PtrDecl and
# ArrayDecl.
#
# The standard states that whenever a new modifier is parsed,
# it should be added to the end of the list of modifiers. For
# example:
#
# K&R2 A.8.6.2: Array Declarators
#
# In a declaration T D where D has the form
# D1 [constant-expression-opt]
# and the type of the identifier in the declaration T D1 is
# "type-modifier T", the type of the
# identifier of D is "type-modifier array of T"
#
# This is what this method does. The declarator it receives
# can be a list of declarators ending with TypeDecl. It
# tacks the modifier to the end of this list, just before
# the TypeDecl.
#
# Additionally, the modifier may be a list itself. This is
# useful for pointers, that can come as a chain from the rule
# p_pointer. In this case, the whole modifier list is spliced
# into the new location.
#
def _type_modify_decl(self, decl, modifier):
""" Tacks a type modifier on a declarator, and returns
the modified declarator.
Note: the declarator and modifier may be modified
"""
#~ print '****'
#~ decl.show(offset=3)
#~ modifier.show(offset=3)
#~ print '****'
modifier_head = modifier
modifier_tail = modifier
# The modifier may be a nested list. Reach its tail.
#
while modifier_tail.type:
modifier_tail = modifier_tail.type
# If the decl is a basic type, just tack the modifier onto
# it
#
if isinstance(decl, c_ast.TypeDecl):
modifier_tail.type = decl
return modifier
else:
# Otherwise, the decl is a list of modifiers. Reach
# its tail and splice the modifier onto the tail,
# pointing to the underlying basic type.
#
decl_tail = decl
while not isinstance(decl_tail.type, c_ast.TypeDecl):
decl_tail = decl_tail.type
modifier_tail.type = decl_tail.type
decl_tail.type = modifier_head
return decl
# Due to the order in which declarators are constructed,
# they have to be fixed in order to look like a normal AST.
#
# When a declaration arrives from syntax construction, it has
# these problems:
# * The innermost TypeDecl has no type (because the basic
# type is only known at the uppermost declaration level)
# * The declaration has no variable name, since that is saved
# in the innermost TypeDecl
# * The typename of the declaration is a list of type
# specifiers, and not a node. Here, basic identifier types
# should be separated from more complex types like enums
# and structs.
#
# This method fixes these problems.
#
def _fix_decl_name_type(self, decl, typename):
""" Fixes a declaration. Modifies decl.
"""
# Reach the underlying basic type
#
type = decl
while not isinstance(type, c_ast.TypeDecl):
type = type.type
decl.name = type.declname
type.quals = decl.quals
# The typename is a list of types. If any type in this
# list isn't an IdentifierType, it must be the only
# type in the list (it's illegal to declare "int enum ..")
# If all the types are basic, they're collected in the
# IdentifierType holder.
#
for tn in typename:
if not isinstance(tn, c_ast.IdentifierType):
if len(typename) > 1:
self._parse_error(
"Invalid multiple types specified", tn.coord)
else:
type.type = tn
return decl
if not typename:
# Functions default to returning int
#
if not isinstance(decl.type, c_ast.FuncDecl):
self._parse_error(
"Missing type in declaration", decl.coord)
type.type = c_ast.IdentifierType(
['int'],
coord=decl.coord)
else:
# At this point, we know that typename is a list of IdentifierType
# nodes. Concatenate all the names into a single list.
#
type.type = c_ast.IdentifierType(
[name for id in typename for name in id.names],
coord=typename[0].coord)
return decl
def _add_declaration_specifier(self, declspec, newspec, kind):
""" Declaration specifiers are represented by a dictionary
with the entries:
* qual: a list of type qualifiers
* storage: a list of storage type qualifiers
* type: a list of type specifiers
* function: a list of function specifiers
This method is given a declaration specifier, and a
new specifier of a given kind.
Returns the declaration specifier, with the new
specifier incorporated.
"""
spec = declspec or dict(qual=[], storage=[], type=[], function=[])
spec[kind].insert(0, newspec)
return spec
def _build_declarations(self, spec, decls, typedef_namespace=False):
""" Builds a list of declarations all sharing the given specifiers.
If typedef_namespace is true, each declared name is added
to the "typedef namespace", which also includes objects,
functions, and enum constants.
"""
is_typedef = 'typedef' in spec['storage']
declarations = []
# Bit-fields are allowed to be unnamed.
#
if decls[0].get('bitsize') is not None:
pass
# When redeclaring typedef names as identifiers in inner scopes, a
# problem can occur where the identifier gets grouped into
# spec['type'], leaving decl as None. This can only occur for the
# first declarator.
#
elif decls[0]['decl'] is None:
if len(spec['type']) < 2 or len(spec['type'][-1].names) != 1 or \
not self._is_type_in_scope(spec['type'][-1].names[0]):
coord = '?'
for t in spec['type']:
if hasattr(t, 'coord'):
coord = t.coord
break
self._parse_error('Invalid declaration', coord)
# Make this look as if it came from "direct_declarator:ID"
decls[0]['decl'] = c_ast.TypeDecl(
declname=spec['type'][-1].names[0],
type=None,
quals=None,
coord=spec['type'][-1].coord)
# Remove the "new" type's name from the end of spec['type']
del spec['type'][-1]
# A similar problem can occur where the declaration ends up looking
# like an abstract declarator. Give it a name if this is the case.
#
elif not isinstance(decls[0]['decl'],
(c_ast.Struct, c_ast.Union, c_ast.IdentifierType)):
decls_0_tail = decls[0]['decl']
while not isinstance(decls_0_tail, c_ast.TypeDecl):
decls_0_tail = decls_0_tail.type
if decls_0_tail.declname is None:
decls_0_tail.declname = spec['type'][-1].names[0]
del spec['type'][-1]
for decl in decls:
assert decl['decl'] is not None
if is_typedef:
declaration = c_ast.Typedef(
name=None,
quals=spec['qual'],
storage=spec['storage'],
type=decl['decl'],
coord=decl['decl'].coord)
else:
declaration = c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=decl['decl'],
init=decl.get('init'),
bitsize=decl.get('bitsize'),
coord=decl['decl'].coord)
if isinstance(declaration.type,
(c_ast.Struct, c_ast.Union, c_ast.IdentifierType)):
fixed_decl = declaration
else:
fixed_decl = self._fix_decl_name_type(declaration, spec['type'])
# Add the type name defined by typedef to a
# symbol table (for usage in the lexer)
#
if typedef_namespace:
if is_typedef:
self._add_typedef_name(fixed_decl.name, fixed_decl.coord)
else:
self._add_identifier(fixed_decl.name, fixed_decl.coord)
declarations.append(fixed_decl)
return declarations
def _build_function_definition(self, spec, decl, param_decls, body):
""" Builds a function definition.
"""
assert 'typedef' not in spec['storage']
declaration = self._build_declarations(
spec=spec,
decls=[dict(decl=decl, init=None)],
typedef_namespace=True)[0]
return c_ast.FuncDef(
decl=declaration,
param_decls=param_decls,
body=body,
coord=decl.coord)
def _select_struct_union_class(self, token):
""" Given a token (either STRUCT or UNION), selects the
appropriate AST class.
"""
if token == 'struct':
return c_ast.Struct
else:
return c_ast.Union
##
## Precedence and associativity of operators
##
precedence = (
('left', 'LOR'),
('left', 'LAND'),
('left', 'OR'),
('left', 'XOR'),
('left', 'AND'),
('left', 'EQ', 'NE'),
('left', 'GT', 'GE', 'LT', 'LE'),
('left', 'RSHIFT', 'LSHIFT'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE', 'MOD')
)
##
## Grammar productions
## Implementation of the BNF defined in K&R2 A.13
##
# Wrapper around a translation unit, to allow for empty input.
# Not strictly part of the C99 Grammar, but useful in practice.
#
def p_translation_unit_or_empty(self, p):
""" translation_unit_or_empty : translation_unit
| empty
"""
if p[1] is None:
p[0] = c_ast.FileAST([])
else:
p[0] = c_ast.FileAST(p[1])
def p_translation_unit_1(self, p):
""" translation_unit : external_declaration
"""
# Note: external_declaration is already a list
#
p[0] = p[1]
def p_translation_unit_2(self, p):
""" translation_unit : translation_unit external_declaration
"""
if p[2] is not None:
p[1].extend(p[2])
p[0] = p[1]
# Declarations always come as lists (because they can be
# several in one line), so we wrap the function definition
# into a list as well, to make the return value of
# external_declaration homogenous.
#
def p_external_declaration_1(self, p):
""" external_declaration : function_definition
"""
p[0] = [p[1]]
def p_external_declaration_2(self, p):
""" external_declaration : declaration
"""
p[0] = p[1]
def p_external_declaration_3(self, p):
""" external_declaration : pp_directive
"""
p[0] = p[1]
def p_external_declaration_4(self, p):
""" external_declaration : SEMI
"""
p[0] = None
def p_pp_directive(self, p):
""" pp_directive : PPHASH
"""
self._parse_error('Directives not supported yet',
self._coord(p.lineno(1)))
# In function definitions, the declarator can be followed by
# a declaration list, for old "K&R style" function definitios.
#
def p_function_definition_1(self, p):
""" function_definition : declarator declaration_list_opt compound_statement
"""
# no declaration specifiers - 'int' becomes the default type
spec = dict(
qual=[],
storage=[],
type=[c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))],
function=[])
p[0] = self._build_function_definition(
spec=spec,
decl=p[1],
param_decls=p[2],
body=p[3])
def p_function_definition_2(self, p):
""" function_definition : declaration_specifiers declarator declaration_list_opt compound_statement
"""
spec = p[1]
p[0] = self._build_function_definition(
spec=spec,
decl=p[2],
param_decls=p[3],
body=p[4])
def p_statement(self, p):
""" statement : labeled_statement
| expression_statement
| compound_statement
| selection_statement
| iteration_statement
| jump_statement
"""
p[0] = p[1]
# In C, declarations can come several in a line:
# int x, *px, romulo = 5;
#
# However, for the AST, we will split them to separate Decl
# nodes.
#
# This rule splits its declarations and always returns a list
# of Decl nodes, even if it's one element long.
#
def p_decl_body(self, p):
""" decl_body : declaration_specifiers init_declarator_list_opt
"""
spec = p[1]
# p[2] (init_declarator_list_opt) is either a list or None
#
if p[2] is None:
# By the standard, you must have at least one declarator unless
# declaring a structure tag, a union tag, or the members of an
# enumeration.
#
ty = spec['type']
s_u_or_e = (c_ast.Struct, c_ast.Union, c_ast.Enum)
if len(ty) == 1 and isinstance(ty[0], s_u_or_e):
decls = [c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=ty[0],
init=None,
bitsize=None,
coord=ty[0].coord)]
# However, this case can also occur on redeclared identifiers in
# an inner scope. The trouble is that the redeclared type's name
# gets grouped into declaration_specifiers; _build_declarations
# compensates for this.
#
else:
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=None, init=None)],
typedef_namespace=True)
else:
decls = self._build_declarations(
spec=spec,
decls=p[2],
typedef_namespace=True)
p[0] = decls
# The declaration has been split to a decl_body sub-rule and
# SEMI, because having them in a single rule created a problem
# for defining typedefs.
#
# If a typedef line was directly followed by a line using the
# type defined with the typedef, the type would not be
# recognized. This is because to reduce the declaration rule,
# the parser's lookahead asked for the token after SEMI, which
# was the type from the next line, and the lexer had no chance
# to see the updated type symbol table.
#
# Splitting solves this problem, because after seeing SEMI,
# the parser reduces decl_body, which actually adds the new
# type into the table to be seen by the lexer before the next
# line is reached.
def p_declaration(self, p):
""" declaration : decl_body SEMI
"""
p[0] = p[1]
# Since each declaration is a list of declarations, this
# rule will combine all the declarations and return a single
# list
#
def p_declaration_list(self, p):
""" declaration_list : declaration
| declaration_list declaration
"""
p[0] = p[1] if len(p) == 2 else p[1] + p[2]
def p_declaration_specifiers_1(self, p):
""" declaration_specifiers : type_qualifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
def p_declaration_specifiers_2(self, p):
""" declaration_specifiers : type_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'type')
def p_declaration_specifiers_3(self, p):
""" declaration_specifiers : storage_class_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'storage')
def p_declaration_specifiers_4(self, p):
""" declaration_specifiers : function_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'function')
def p_storage_class_specifier(self, p):
""" storage_class_specifier : AUTO
| REGISTER
| STATIC
| EXTERN
| TYPEDEF
"""
p[0] = p[1]
def p_function_specifier(self, p):
""" function_specifier : INLINE
"""
p[0] = p[1]
def p_type_specifier_1(self, p):
""" type_specifier : VOID
| _BOOL
| CHAR
| SHORT
| INT
| LONG
| FLOAT
| DOUBLE
| _COMPLEX
| SIGNED
| UNSIGNED
"""
p[0] = c_ast.IdentifierType([p[1]], coord=self._coord(p.lineno(1)))
def p_type_specifier_2(self, p):
""" type_specifier : typedef_name
| enum_specifier
| struct_or_union_specifier
"""
p[0] = p[1]
def p_type_qualifier(self, p):
""" type_qualifier : CONST
| RESTRICT
| VOLATILE
"""
p[0] = p[1]
def p_init_declarator_list_1(self, p):
""" init_declarator_list : init_declarator
| init_declarator_list COMMA init_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# If the code is declaring a variable that was declared a typedef in an
# outer scope, yacc will think the name is part of declaration_specifiers,
# not init_declarator, and will then get confused by EQUALS. Pass None
# up in place of declarator, and handle this at a higher level.
#
def p_init_declarator_list_2(self, p):
""" init_declarator_list : EQUALS initializer
"""
p[0] = [dict(decl=None, init=p[2])]
# Similarly, if the code contains duplicate typedefs of, for example,
# array types, the array portion will appear as an abstract declarator.
#
def p_init_declarator_list_3(self, p):
""" init_declarator_list : abstract_declarator
"""
p[0] = [dict(decl=p[1], init=None)]
# Returns a {decl=<declarator> : init=<initializer>} dictionary
# If there's no initializer, uses None
#
def p_init_declarator(self, p):
""" init_declarator : declarator
| declarator EQUALS initializer
"""
p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None))
def p_specifier_qualifier_list_1(self, p):
""" specifier_qualifier_list : type_qualifier specifier_qualifier_list_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
def p_specifier_qualifier_list_2(self, p):
""" specifier_qualifier_list : type_specifier specifier_qualifier_list_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'type')
# TYPEID is allowed here (and in other struct/enum related tag names), because
# struct/enum tags reside in their own namespace and can be named the same as types
#
def p_struct_or_union_specifier_1(self, p):
""" struct_or_union_specifier : struct_or_union ID
| struct_or_union TYPEID
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=p[2],
decls=None,
coord=self._coord(p.lineno(2)))
def p_struct_or_union_specifier_2(self, p):
""" struct_or_union_specifier : struct_or_union brace_open struct_declaration_list brace_close
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=None,
decls=p[3],
coord=self._coord(p.lineno(2)))
def p_struct_or_union_specifier_3(self, p):
""" struct_or_union_specifier : struct_or_union ID brace_open struct_declaration_list brace_close
| struct_or_union TYPEID brace_open struct_declaration_list brace_close
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=p[2],
decls=p[4],
coord=self._coord(p.lineno(2)))
def p_struct_or_union(self, p):
""" struct_or_union : STRUCT
| UNION
"""
p[0] = p[1]
# Combine all declarations into a single list
#
def p_struct_declaration_list(self, p):
""" struct_declaration_list : struct_declaration
| struct_declaration_list struct_declaration
"""
p[0] = p[1] if len(p) == 2 else p[1] + p[2]
def p_struct_declaration_1(self, p):
""" struct_declaration : specifier_qualifier_list struct_declarator_list_opt SEMI
"""
spec = p[1]
assert 'typedef' not in spec['storage']
if p[2] is not None:
decls = self._build_declarations(
spec=spec,
decls=p[2])
elif len(spec['type']) == 1:
# Anonymous struct/union, gcc extension, C1x feature.
# Although the standard only allows structs/unions here, I see no
# reason to disallow other types since some compilers have typedefs
# here, and pycparser isn't about rejecting all invalid code.
#
node = spec['type'][0]
if isinstance(node, c_ast.Node):
decl_type = node
else:
decl_type = c_ast.IdentifierType(node)
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=decl_type)])
else:
# Structure/union members can have the same names as typedefs.
# The trouble is that the member's name gets grouped into
# specifier_qualifier_list; _build_declarations compensates.
#
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=None, init=None)])
p[0] = decls
def p_struct_declaration_2(self, p):
""" struct_declaration : specifier_qualifier_list abstract_declarator SEMI
"""
# "Abstract declarator?!", you ask? Structure members can have the
# same names as typedefs. The trouble is that the member's name gets
# grouped into specifier_qualifier_list, leaving any remainder to
# appear as an abstract declarator, as in:
# typedef int Foo;
# struct { Foo Foo[3]; };
#
p[0] = self._build_declarations(
spec=p[1],
decls=[dict(decl=p[2], init=None)])
def p_struct_declarator_list(self, p):
""" struct_declarator_list : struct_declarator
| struct_declarator_list COMMA struct_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# struct_declarator passes up a dict with the keys: decl (for
# the underlying declarator) and bitsize (for the bitsize)
#
def p_struct_declarator_1(self, p):
""" struct_declarator : declarator
"""
p[0] = {'decl': p[1], 'bitsize': None}
def p_struct_declarator_2(self, p):
""" struct_declarator : declarator COLON constant_expression
| COLON constant_expression
"""
if len(p) > 3:
p[0] = {'decl': p[1], 'bitsize': p[3]}
else:
p[0] = {'decl': c_ast.TypeDecl(None, None, None), 'bitsize': p[2]}
def p_enum_specifier_1(self, p):
""" enum_specifier : ENUM ID
| ENUM TYPEID
"""
p[0] = c_ast.Enum(p[2], None, self._coord(p.lineno(1)))
def p_enum_specifier_2(self, p):
""" enum_specifier : ENUM brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(None, p[3], self._coord(p.lineno(1)))
def p_enum_specifier_3(self, p):
""" enum_specifier : ENUM ID brace_open enumerator_list brace_close
| ENUM TYPEID brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(p[2], p[4], self._coord(p.lineno(1)))
def p_enumerator_list(self, p):
""" enumerator_list : enumerator
| enumerator_list COMMA
| enumerator_list COMMA enumerator
"""
if len(p) == 2:
p[0] = c_ast.EnumeratorList([p[1]], p[1].coord)
elif len(p) == 3:
p[0] = p[1]
else:
p[1].enumerators.append(p[3])
p[0] = p[1]
def p_enumerator(self, p):
""" enumerator : ID
| ID EQUALS constant_expression
"""
if len(p) == 2:
enumerator = c_ast.Enumerator(
p[1], None,
self._coord(p.lineno(1)))
else:
enumerator = c_ast.Enumerator(
p[1], p[3],
self._coord(p.lineno(1)))
self._add_identifier(enumerator.name, enumerator.coord)
p[0] = enumerator
def p_declarator_1(self, p):
""" declarator : direct_declarator
"""
p[0] = p[1]
def p_declarator_2(self, p):
""" declarator : pointer direct_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
# Since it's impossible for a type to be specified after a pointer, assume
# it's intended to be the name for this declaration. _add_identifier will
# raise an error if this TYPEID can't be redeclared.
#
def p_declarator_3(self, p):
""" declarator : pointer TYPEID
"""
decl = c_ast.TypeDecl(
declname=p[2],
type=None,
quals=None,
coord=self._coord(p.lineno(2)))
p[0] = self._type_modify_decl(decl, p[1])
def p_direct_declarator_1(self, p):
""" direct_declarator : ID
"""
p[0] = c_ast.TypeDecl(
declname=p[1],
type=None,
quals=None,
coord=self._coord(p.lineno(1)))
def p_direct_declarator_2(self, p):
""" direct_declarator : LPAREN declarator RPAREN
"""
p[0] = p[2]
def p_direct_declarator_3(self, p):
""" direct_declarator : direct_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET
"""
# Accept dimension qualifiers
# Per C99 6.7.5.3 p7
arr = c_ast.ArrayDecl(
type=None,
dim=p[4],
dim_quals=p[3] if p[3] != None else [],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_declarator_4(self, p):
""" direct_declarator : direct_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET
| direct_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET
"""
# Using slice notation for PLY objects doesn't work in Python 3 for the
# version of PLY embedded with pycparser; see PLY Google Code issue 30.
# Work around that here by listing the two elements separately.
listed_quals = [item if isinstance(item, list) else [item]
for item in [p[3],p[4]]]
dim_quals = [qual for sublist in listed_quals for qual in sublist
if qual is not None]
arr = c_ast.ArrayDecl(
type=None,
dim=p[5],
dim_quals=dim_quals,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
# Special for VLAs
#
def p_direct_declarator_5(self, p):
""" direct_declarator : direct_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[4], self._coord(p.lineno(4))),
dim_quals=p[3] if p[3] != None else [],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_declarator_6(self, p):
""" direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN
| direct_declarator LPAREN identifier_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
# To see why _get_yacc_lookahead_token is needed, consider:
# typedef char TT;
# void foo(int TT) { TT = 10; }
# Outside the function, TT is a typedef, but inside (starting and
# ending with the braces) it's a parameter. The trouble begins with
# yacc's lookahead token. We don't know if we're declaring or
# defining a function until we see LBRACE, but if we wait for yacc to
# trigger a rule on that token, then TT will have already been read
# and incorrectly interpreted as TYPEID. We need to add the
# parameters to the scope the moment the lexer sees LBRACE.
#
if self._get_yacc_lookahead_token().type == "LBRACE":
if func.args is not None:
for param in func.args.params:
if isinstance(param, c_ast.EllipsisParam): break
self._add_identifier(param.name, param.coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_pointer(self, p):
""" pointer : TIMES type_qualifier_list_opt
| TIMES type_qualifier_list_opt pointer
"""
coord = self._coord(p.lineno(1))
p[0] = c_ast.PtrDecl(
quals=p[2] or [],
type=p[3] if len(p) > 3 else None,
coord=coord)
def p_type_qualifier_list(self, p):
""" type_qualifier_list : type_qualifier
| type_qualifier_list type_qualifier
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_parameter_type_list(self, p):
""" parameter_type_list : parameter_list
| parameter_list COMMA ELLIPSIS
"""
if len(p) > 2:
p[1].params.append(c_ast.EllipsisParam(self._coord(p.lineno(3))))
p[0] = p[1]
def p_parameter_list(self, p):
""" parameter_list : parameter_declaration
| parameter_list COMMA parameter_declaration
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_parameter_declaration_1(self, p):
""" parameter_declaration : declaration_specifiers declarator
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))]
p[0] = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2])])[0]
def p_parameter_declaration_2(self, p):
""" parameter_declaration : declaration_specifiers abstract_declarator_opt
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))]
# Parameters can have the same names as typedefs. The trouble is that
# the parameter's name gets grouped into declaration_specifiers, making
# it look like an old-style declaration; compensate.
#
if len(spec['type']) > 1 and len(spec['type'][-1].names) == 1 and \
self._is_type_in_scope(spec['type'][-1].names[0]):
decl = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2], init=None)])[0]
# This truly is an old-style parameter declaration
#
else:
decl = c_ast.Typename(
quals=spec['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(2)))
typename = spec['type']
decl = self._fix_decl_name_type(decl, typename)
p[0] = decl
def p_identifier_list(self, p):
""" identifier_list : identifier
| identifier_list COMMA identifier
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_initializer_1(self, p):
""" initializer : assignment_expression
"""
p[0] = p[1]
def p_initializer_2(self, p):
""" initializer : brace_open initializer_list brace_close
| brace_open initializer_list COMMA brace_close
"""
p[0] = p[2]
def p_initializer_list(self, p):
""" initializer_list : designation_opt initializer
| initializer_list COMMA designation_opt initializer
"""
if len(p) == 3: # single initializer
init = p[2] if p[1] is None else c_ast.NamedInitializer(p[1], p[2])
p[0] = c_ast.InitList([init], p[2].coord)
else:
init = p[4] if p[3] is None else c_ast.NamedInitializer(p[3], p[4])
p[1].exprs.append(init)
p[0] = p[1]
def p_designation(self, p):
""" designation : designator_list EQUALS
"""
p[0] = p[1]
# Designators are represented as a list of nodes, in the order in which
# they're written in the code.
#
def p_designator_list(self, p):
""" designator_list : designator
| designator_list designator
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_designator(self, p):
""" designator : LBRACKET constant_expression RBRACKET
| PERIOD identifier
"""
p[0] = p[2]
def p_type_name(self, p):
""" type_name : specifier_qualifier_list abstract_declarator_opt
"""
#~ print '=========='
#~ print p[1]
#~ print p[2]
#~ print p[2].children()
#~ print '=========='
typename = c_ast.Typename(
quals=p[1]['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(2)))
p[0] = self._fix_decl_name_type(typename, p[1]['type'])
def p_abstract_declarator_1(self, p):
""" abstract_declarator : pointer
"""
dummytype = c_ast.TypeDecl(None, None, None)
p[0] = self._type_modify_decl(
decl=dummytype,
modifier=p[1])
def p_abstract_declarator_2(self, p):
""" abstract_declarator : pointer direct_abstract_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
def p_abstract_declarator_3(self, p):
""" abstract_declarator : direct_abstract_declarator
"""
p[0] = p[1]
# Creating and using direct_abstract_declarator_opt here
# instead of listing both direct_abstract_declarator and the
# lack of it in the beginning of _1 and _2 caused two
# shift/reduce errors.
#
def p_direct_abstract_declarator_1(self, p):
""" direct_abstract_declarator : LPAREN abstract_declarator RPAREN """
p[0] = p[2]
def p_direct_abstract_declarator_2(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET assignment_expression_opt RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=p[3],
dim_quals=[],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_3(self, p):
""" direct_abstract_declarator : LBRACKET assignment_expression_opt RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=p[2],
dim_quals=[],
coord=self._coord(p.lineno(1)))
def p_direct_abstract_declarator_4(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
dim_quals=[],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_5(self, p):
""" direct_abstract_declarator : LBRACKET TIMES RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
dim_quals=[],
coord=self._coord(p.lineno(1)))
def p_direct_abstract_declarator_6(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_direct_abstract_declarator_7(self, p):
""" direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN
"""
p[0] = c_ast.FuncDecl(
args=p[2],
type=c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(1)))
# declaration is a list, statement isn't. To make it consistent, block_item
# will always be a list
#
def p_block_item(self, p):
""" block_item : declaration
| statement
"""
p[0] = p[1] if isinstance(p[1], list) else [p[1]]
# Since we made block_item a list, this just combines lists
#
def p_block_item_list(self, p):
""" block_item_list : block_item
| block_item_list block_item
"""
# Empty block items (plain ';') produce [None], so ignore them
p[0] = p[1] if (len(p) == 2 or p[2] == [None]) else p[1] + p[2]
def p_compound_statement_1(self, p):
""" compound_statement : brace_open block_item_list_opt brace_close """
p[0] = c_ast.Compound(
block_items=p[2],
coord=self._coord(p.lineno(1)))
def p_labeled_statement_1(self, p):
""" labeled_statement : ID COLON statement """
p[0] = c_ast.Label(p[1], p[3], self._coord(p.lineno(1)))
def p_labeled_statement_2(self, p):
""" labeled_statement : CASE constant_expression COLON statement """
p[0] = c_ast.Case(p[2], [p[4]], self._coord(p.lineno(1)))
def p_labeled_statement_3(self, p):
""" labeled_statement : DEFAULT COLON statement """
p[0] = c_ast.Default([p[3]], self._coord(p.lineno(1)))
def p_selection_statement_1(self, p):
""" selection_statement : IF LPAREN expression RPAREN statement """
p[0] = c_ast.If(p[3], p[5], None, self._coord(p.lineno(1)))
def p_selection_statement_2(self, p):
""" selection_statement : IF LPAREN expression RPAREN statement ELSE statement """
p[0] = c_ast.If(p[3], p[5], p[7], self._coord(p.lineno(1)))
def p_selection_statement_3(self, p):
""" selection_statement : SWITCH LPAREN expression RPAREN statement """
p[0] = fix_switch_cases(
c_ast.Switch(p[3], p[5], self._coord(p.lineno(1))))
def p_iteration_statement_1(self, p):
""" iteration_statement : WHILE LPAREN expression RPAREN statement """
p[0] = c_ast.While(p[3], p[5], self._coord(p.lineno(1)))
def p_iteration_statement_2(self, p):
""" iteration_statement : DO statement WHILE LPAREN expression RPAREN SEMI """
p[0] = c_ast.DoWhile(p[5], p[2], self._coord(p.lineno(1)))
def p_iteration_statement_3(self, p):
""" iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN statement """
p[0] = c_ast.For(p[3], p[5], p[7], p[9], self._coord(p.lineno(1)))
def p_iteration_statement_4(self, p):
""" iteration_statement : FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN statement """
p[0] = c_ast.For(c_ast.DeclList(p[3], self._coord(p.lineno(1))),
p[4], p[6], p[8], self._coord(p.lineno(1)))
def p_jump_statement_1(self, p):
""" jump_statement : GOTO ID SEMI """
p[0] = c_ast.Goto(p[2], self._coord(p.lineno(1)))
def p_jump_statement_2(self, p):
""" jump_statement : BREAK SEMI """
p[0] = c_ast.Break(self._coord(p.lineno(1)))
def p_jump_statement_3(self, p):
""" jump_statement : CONTINUE SEMI """
p[0] = c_ast.Continue(self._coord(p.lineno(1)))
def p_jump_statement_4(self, p):
""" jump_statement : RETURN expression SEMI
| RETURN SEMI
"""
p[0] = c_ast.Return(p[2] if len(p) == 4 else None, self._coord(p.lineno(1)))
def p_expression_statement(self, p):
""" expression_statement : expression_opt SEMI """
if p[1] is None:
p[0] = c_ast.EmptyStatement(self._coord(p.lineno(1)))
else:
p[0] = p[1]
def p_expression(self, p):
""" expression : assignment_expression
| expression COMMA assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
if not isinstance(p[1], c_ast.ExprList):
p[1] = c_ast.ExprList([p[1]], p[1].coord)
p[1].exprs.append(p[3])
p[0] = p[1]
def p_typedef_name(self, p):
""" typedef_name : TYPEID """
p[0] = c_ast.IdentifierType([p[1]], coord=self._coord(p.lineno(1)))
def p_assignment_expression(self, p):
""" assignment_expression : conditional_expression
| unary_expression assignment_operator assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.Assignment(p[2], p[1], p[3], p[1].coord)
# K&R2 defines these as many separate rules, to encode
# precedence and associativity. Why work hard ? I'll just use
# the built in precedence/associativity specification feature
# of PLY. (see precedence declaration above)
#
def p_assignment_operator(self, p):
""" assignment_operator : EQUALS
| XOREQUAL
| TIMESEQUAL
| DIVEQUAL
| MODEQUAL
| PLUSEQUAL
| MINUSEQUAL
| LSHIFTEQUAL
| RSHIFTEQUAL
| ANDEQUAL
| OREQUAL
"""
p[0] = p[1]
def p_constant_expression(self, p):
""" constant_expression : conditional_expression """
p[0] = p[1]
def p_conditional_expression(self, p):
""" conditional_expression : binary_expression
| binary_expression CONDOP expression COLON conditional_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.TernaryOp(p[1], p[3], p[5], p[1].coord)
def p_binary_expression(self, p):
""" binary_expression : cast_expression
| binary_expression TIMES binary_expression
| binary_expression DIVIDE binary_expression
| binary_expression MOD binary_expression
| binary_expression PLUS binary_expression
| binary_expression MINUS binary_expression
| binary_expression RSHIFT binary_expression
| binary_expression LSHIFT binary_expression
| binary_expression LT binary_expression
| binary_expression LE binary_expression
| binary_expression GE binary_expression
| binary_expression GT binary_expression
| binary_expression EQ binary_expression
| binary_expression NE binary_expression
| binary_expression AND binary_expression
| binary_expression OR binary_expression
| binary_expression XOR binary_expression
| binary_expression LAND binary_expression
| binary_expression LOR binary_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.BinaryOp(p[2], p[1], p[3], p[1].coord)
def p_cast_expression_1(self, p):
""" cast_expression : unary_expression """
p[0] = p[1]
def p_cast_expression_2(self, p):
""" cast_expression : LPAREN type_name RPAREN cast_expression """
p[0] = c_ast.Cast(p[2], p[4], self._coord(p.lineno(1)))
def p_unary_expression_1(self, p):
""" unary_expression : postfix_expression """
p[0] = p[1]
def p_unary_expression_2(self, p):
""" unary_expression : PLUSPLUS unary_expression
| MINUSMINUS unary_expression
| unary_operator cast_expression
"""
p[0] = c_ast.UnaryOp(p[1], p[2], p[2].coord)
def p_unary_expression_3(self, p):
""" unary_expression : SIZEOF unary_expression
| SIZEOF LPAREN type_name RPAREN
"""
p[0] = c_ast.UnaryOp(
p[1],
p[2] if len(p) == 3 else p[3],
self._coord(p.lineno(1)))
def p_unary_operator(self, p):
""" unary_operator : AND
| TIMES
| PLUS
| MINUS
| NOT
| LNOT
"""
p[0] = p[1]
def p_postfix_expression_1(self, p):
""" postfix_expression : primary_expression """
p[0] = p[1]
def p_postfix_expression_2(self, p):
""" postfix_expression : postfix_expression LBRACKET expression RBRACKET """
p[0] = c_ast.ArrayRef(p[1], p[3], p[1].coord)
def p_postfix_expression_3(self, p):
""" postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN
| postfix_expression LPAREN RPAREN
"""
p[0] = c_ast.FuncCall(p[1], p[3] if len(p) == 5 else None, p[1].coord)
def p_postfix_expression_4(self, p):
""" postfix_expression : postfix_expression PERIOD ID
| postfix_expression PERIOD TYPEID
| postfix_expression ARROW ID
| postfix_expression ARROW TYPEID
"""
field = c_ast.ID(p[3], self._coord(p.lineno(3)))
p[0] = c_ast.StructRef(p[1], p[2], field, p[1].coord)
def p_postfix_expression_5(self, p):
""" postfix_expression : postfix_expression PLUSPLUS
| postfix_expression MINUSMINUS
"""
p[0] = c_ast.UnaryOp('p' + p[2], p[1], p[1].coord)
def p_postfix_expression_6(self, p):
""" postfix_expression : LPAREN type_name RPAREN brace_open initializer_list brace_close
| LPAREN type_name RPAREN brace_open initializer_list COMMA brace_close
"""
p[0] = c_ast.CompoundLiteral(p[2], p[5])
def p_primary_expression_1(self, p):
""" primary_expression : identifier """
p[0] = p[1]
def p_primary_expression_2(self, p):
""" primary_expression : constant """
p[0] = p[1]
def p_primary_expression_3(self, p):
""" primary_expression : unified_string_literal
| unified_wstring_literal
"""
p[0] = p[1]
def p_primary_expression_4(self, p):
""" primary_expression : LPAREN expression RPAREN """
p[0] = p[2]
def p_argument_expression_list(self, p):
""" argument_expression_list : assignment_expression
| argument_expression_list COMMA assignment_expression
"""
if len(p) == 2: # single expr
p[0] = c_ast.ExprList([p[1]], p[1].coord)
else:
p[1].exprs.append(p[3])
p[0] = p[1]
def p_identifier(self, p):
""" identifier : ID """
p[0] = c_ast.ID(p[1], self._coord(p.lineno(1)))
def p_constant_1(self, p):
""" constant : INT_CONST_DEC
| INT_CONST_OCT
| INT_CONST_HEX
"""
p[0] = c_ast.Constant(
'int', p[1], self._coord(p.lineno(1)))
def p_constant_2(self, p):
""" constant : FLOAT_CONST
| HEX_FLOAT_CONST
"""
p[0] = c_ast.Constant(
'float', p[1], self._coord(p.lineno(1)))
def p_constant_3(self, p):
""" constant : CHAR_CONST
| WCHAR_CONST
"""
p[0] = c_ast.Constant(
'char', p[1], self._coord(p.lineno(1)))
# The "unified" string and wstring literal rules are for supporting
# concatenation of adjacent string literals.
# I.e. "hello " "world" is seen by the C compiler as a single string literal
# with the value "hello world"
#
def p_unified_string_literal(self, p):
""" unified_string_literal : STRING_LITERAL
| unified_string_literal STRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._coord(p.lineno(1)))
else:
p[1].value = p[1].value[:-1] + p[2][1:]
p[0] = p[1]
def p_unified_wstring_literal(self, p):
""" unified_wstring_literal : WSTRING_LITERAL
| unified_wstring_literal WSTRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._coord(p.lineno(1)))
else:
p[1].value = p[1].value.rstrip()[:-1] + p[2][2:]
p[0] = p[1]
def p_brace_open(self, p):
""" brace_open : LBRACE
"""
p[0] = p[1]
def p_brace_close(self, p):
""" brace_close : RBRACE
"""
p[0] = p[1]
def p_empty(self, p):
'empty : '
p[0] = None
def p_error(self, p):
# If error recovery is added here in the future, make sure
# _get_yacc_lookahead_token still works!
#
if p:
self._parse_error(
'before: %s' % p.value,
self._coord(lineno=p.lineno,
column=self.clex.find_tok_column(p)))
else:
self._parse_error('At end of input', '')
#------------------------------------------------------------------------------
if __name__ == "__main__":
import pprint
import time, sys
#t1 = time.time()
#parser = CParser(lex_optimize=True, yacc_debug=True, yacc_optimize=False)
#sys.write(time.time() - t1)
#buf = '''
#int (*k)(int);
#'''
## set debuglevel to 2 for debugging
#t = parser.parse(buf, 'x.c', debuglevel=0)
#t.show(showcoord=True)
| bsd-3-clause | -4,062,740,907,788,481,500 | 35.515042 | 122 | 0.525145 | false |
cdoremus/udacity-python_web_development-cs253 | src/unit2/rot13/rot13_main.py | 1 | 2113 | '''
In order to be graded correctly for this homework, there are a few things
to keep in mind. We'll be grading your web app by POSTing to your form and
retrieving the text that has been encoded with ROT13. There are a few main
issues you need to keep in mind in order for this to work:
1. The textarea form element where the user inputs the text to encode must be
named 'text'. In other words, you must have 'textarea name="text"' for us to post to.
2. The form method must be POST, not GET.
3. You must enter the full url into the supplied textbox above, including the
path. For example, our example app is running at http://udacity-cs253.appspot.com/unit2/rot13,
but if we instead only entered http://udacity-cs253.appspot.com/ then the grading script would not work.
4. Don't forget to escape your output!
VIDEO NOTES:
Rot13 increments every letter by 13
Getting to the end of the alphabet, the count of a letter backs upon itself.
For instance, z becomes m
Rot13 encrypting a string that has been Rot13 encrypted gets the original string.
Case must be preserved
Punctuation must be preserved
Also preserve whitespace
Escape the HTML
Udacity Test site
http://udacity-cs253.appspot.com/unit2/rot13
My Production URL:
http://cdoremus-udacity-cs253.appspot.com/unit2/rot13
'''
import os
import webapp2
from google.appengine.ext.webapp import template
from rot13 import Rot13
class Rot13MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
values = {'rot13_string':''}
path = os.path.join(os.path.dirname(__file__), 'rot13.html')
self.response.out.write(template.render(path, values))
def post(self):
self.response.headers['Content-Type'] = 'text/html'
text = self.request.get('text')
rot13 = Rot13()
encrypted = rot13.encrypt(text) # escaping done in template using 'escape' attribute
values = {'rot13_string':encrypted}
path = os.path.join(os.path.dirname(__file__), 'rot13.html')
self.response.out.write(template.render(path, values))
| apache-2.0 | 8,370,183,515,845,830,000 | 38.148148 | 104 | 0.723616 | false |
amd77/parker | inventario/migrations/0007_auto_20171014_1756.py | 1 | 1706 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventario', '0006_auto_20170930_1629'),
]
operations = [
migrations.CreateModel(
name='ComandoRemoto',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.CharField(help_text='nombre del comando', max_length=100, null=True, blank=True)),
('comando', models.CharField(help_text='comando', max_length=100, null=True, blank=True)),
],
options={
'verbose_name': 'comando Remoto',
'verbose_name_plural': 'Comandos Remotos',
},
),
migrations.CreateModel(
name='NodoRemoto',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField(help_text=' url del demonio nameko', max_length=100, null=True, blank=True)),
('nombre', models.CharField(help_text='Nombre del demonio nameko', max_length=100, null=True, blank=True)),
('parking', models.ForeignKey(to='inventario.Parking')),
],
options={
'verbose_name': 'Nodo Remoto',
'verbose_name_plural': 'Nodos Remotos',
},
),
migrations.AddField(
model_name='comandoremoto',
name='nodoremoto',
field=models.ForeignKey(to='inventario.NodoRemoto'),
),
]
| gpl-2.0 | -877,057,221,647,865,600 | 37.772727 | 123 | 0.552755 | false |
magus424/powerline | powerline/lint/markedjson/reader.py | 1 | 3808 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import codecs
from powerline.lint.markedjson.error import MarkedError, Mark, NON_PRINTABLE
from powerline.lib.unicode import unicode
# This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
class ReaderError(MarkedError):
pass
class Reader(object):
# Reader:
# - determines the data encoding and converts it to a unicode string,
# - checks if characters are in allowed range,
# - adds '\0' to the end.
# Reader accepts
# - a file-like object with its `read` method returning `str`,
# Yeah, it's ugly and slow.
def __init__(self, stream):
self.name = None
self.stream = None
self.stream_pointer = 0
self.eof = True
self.buffer = ''
self.pointer = 0
self.full_buffer = unicode('')
self.full_pointer = 0
self.raw_buffer = None
self.raw_decode = codecs.utf_8_decode
self.encoding = 'utf-8'
self.index = 0
self.line = 0
self.column = 0
self.stream = stream
self.name = getattr(stream, 'name', "<file>")
self.eof = False
self.raw_buffer = None
while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
self.update_raw()
self.update(1)
def peek(self, index=0):
try:
return self.buffer[self.pointer + index]
except IndexError:
self.update(index + 1)
return self.buffer[self.pointer + index]
def prefix(self, length=1):
if self.pointer + length >= len(self.buffer):
self.update(length)
return self.buffer[self.pointer:self.pointer + length]
def update_pointer(self, length):
while length:
ch = self.buffer[self.pointer]
self.pointer += 1
self.full_pointer += 1
self.index += 1
if ch == '\n':
self.line += 1
self.column = 0
else:
self.column += 1
length -= 1
def forward(self, length=1):
if self.pointer + length + 1 >= len(self.buffer):
self.update(length + 1)
self.update_pointer(length)
def get_mark(self):
return Mark(self.name, self.line, self.column, self.full_buffer, self.full_pointer)
def check_printable(self, data):
match = NON_PRINTABLE.search(data)
if match:
self.update_pointer(match.start())
raise ReaderError(
'while reading from stream', None,
'found special characters which are not allowed',
Mark(self.name, self.line, self.column, self.full_buffer, self.full_pointer)
)
def update(self, length):
if self.raw_buffer is None:
return
self.buffer = self.buffer[self.pointer:]
self.pointer = 0
while len(self.buffer) < length:
if not self.eof:
self.update_raw()
try:
data, converted = self.raw_decode(self.raw_buffer, 'strict', self.eof)
except UnicodeDecodeError as exc:
character = self.raw_buffer[exc.start]
position = self.stream_pointer - len(self.raw_buffer) + exc.start
data, converted = self.raw_decode(self.raw_buffer[:exc.start], 'strict', self.eof)
self.buffer += data
self.full_buffer += data + '<' + str(ord(character)) + '>'
self.raw_buffer = self.raw_buffer[converted:]
self.update_pointer(exc.start - 1)
raise ReaderError(
'while reading from stream', None,
'found character #x%04x that cannot be decoded by UTF-8 codec' % ord(character),
Mark(self.name, self.line, self.column, self.full_buffer, position)
)
self.buffer += data
self.full_buffer += data
self.raw_buffer = self.raw_buffer[converted:]
self.check_printable(data)
if self.eof:
self.buffer += '\0'
self.raw_buffer = None
break
def update_raw(self, size=4096):
data = self.stream.read(size)
if self.raw_buffer is None:
self.raw_buffer = data
else:
self.raw_buffer += data
self.stream_pointer += len(data)
if not data:
self.eof = True
| mit | 8,412,327,568,441,967,000 | 27 | 86 | 0.676996 | false |
lidavidm/sympy | sympy/liealgebras/type_f.py | 1 | 4555 | from sympy.core import Set, Dict, Tuple, Rational
from .cartan_type import Standard_Cartan
from sympy.matrices import Matrix
class TypeF(Standard_Cartan):
def __init__(self, n):
assert n == 4
Standard_Cartan.__init__(self, "F", 4)
def dimension(self):
"""
Returns the dimension of the vector space
V underlying the Lie algebra
Example
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("F4")
>>> c.dimension()
4
"""
return 4
def basic_root(self, i, j):
"""
This is a method just to generate roots
with a 1 iin the ith position and a -1
in the jth postion.
"""
n = self.n
root = [0]*n
root[i] = 1
root[j] = -1
return root
def simple_root(self, i):
"""
Every lie algebra has a unique root system.
Given a root system Q, there is a subset of the
roots such that an element of Q is called a
simple root if it cannot be written as the sum
of two elements in Q. If we let D denote the
set of simple roots, then it is clear that every
element of Q can be written as a linear combination
of elements of D with all coefficients non-negative.
This method returns the ith simple root of F_4
Example
=======
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("F4")
>>> c.simple_root(3)
[0, 0, 0, 1]
"""
if i < 3:
return basic_root(i-1, i)
if i == 3:
root = [0]*4
root[3] = 1
return root
if i == 4:
root = [Rational(-1, 2)]*4
return root
def positive_roots(self):
"""
This method generates all the positive roots of
A_n. This is half of all of the roots of F_4;
by multiplying all the positive roots by -1 we
get the negative roots.
Example
======
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("A3")
>>> c.positive_roots()
{1: [1, -1, 0, 0], 2: [1, 0, -1, 0], 3: [1, 0, 0, -1], 4: [0, 1, -1, 0],
5: [0, 1, 0, -1], 6: [0, 0, 1, -1]}
"""
n = self.n
posroots = {}
k = 0
for i in range(0, n-1):
for j in range(i+1, n):
k += 1
posroots[k] = self.basic_root(i, j)
k += 1
root = self.basic_root(i, j)
root[j] = 1
posroots[k] = root
for i in range(0, n):
k += 1
root = [0]*n
root[i] = 1
posroots[k] = root
k += 1
root = [Rational(1, 2)]*n
posroots[k] = root
for i in range(1, 4):
k += 1
root = [Rational(1, 2)]*n
root[i] = Rational(-1, 2)
posroots[k] = root
posroots[k+1] = [Rational(1, 2), Rational(1, 2), Rational(-1, 2), Rational(-1, 2)]
posroots[k+2] = [Rational(1, 2), Rational(-1, 2), Rational(1, 2), Rational(-1, 2)]
posroots[k+3] = [Rational(1, 2), Rational(-1, 2), Rational(-1, 2), Rational(1, 2)]
posroots[k+4] = [Rational(1, 2), Rational(-1, 2), Rational(-1, 2), Rational(-1, 2)]
return posroots
def roots(self):
"""
Returns the total number of roots for F_4
"""
return 48
def cartan_matrix(self):
"""
Returns the Cartan matrix for F_4
The Cartan matrix matrix for a Lie algebra is
generated by assigning an ordering to the simple
roots, (alpha[1], ...., alpha[l]). Then the ijth
entry of the Cartan matrix is (<alpha[i],alpha[j]>).
Example
=======
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType('A4')
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0, 0],
[-1, 2, -1, 0],
[ 0, -1, 2, -1],
[ 0, 0, -1, 2]])
"""
m = Matrix( 4, 4, [2, -1, 0, 0, -1, 2, -2, 0, 0,
-1, 2, -1, 0, 0, -1, 2])
return m
def basis(self):
"""
Returns the number of independent generators of F_4
"""
return 52
def dynkin_diagram(self):
diag = "0---0=>=0---0\n"
diag += " ".join(str(i) for i in range(1, 5))
return diag
| bsd-3-clause | -5,288,564,138,958,715,000 | 27.117284 | 91 | 0.478156 | false |
flailingsquirrel/asciimapper | OSMTileLoader.py | 1 | 3184 | #!/usr/bin/python
######################################################################
# Ascii TMS Viewer
#
#--------------------------------------------------------------------
# Brian Hone | Initial Release
#--------------------------------------------------------------------
#
#--------------------------------------------------------------------
# Copyright (c) 2009 Brian Hone
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
######################################################################
import curses, time, sys, os, string, random, math
import pprint
from Wget import *
from TileMap import TileMap
from TileLoader import TileLoader
import img_to_aa
false = 0
true = 1
class OSMTileLoader( TileLoader):
def __init__(self, (sizeX, sizeY), url, cacheUrl ):
TileLoader.__init__( self, (sizeX, sizeY), cacheUrl )
self.baseUrl = url
self.mapChars = "....,;clodxkO.XNOM"
# end __init__
def fetchTile( self, x, y, z ):
tileArr = self.getEmptyTile()
pngFile = self.cacheUrl + "/%s/%s/%s.png" % ( z,x,y )
url = self.baseUrl + "/%s/%s/%s.png" % ( z,x,y )
args = [ '-x', url ]
wget( args )
# convert to ascii
row_ctr = 0
col_ctr = 0
img_text = img_to_aa.load_and_scale_image( pngFile, self.mapChars, width=self.sizeX, height=self.sizeY, grayscale=True )
for line in img_text:
for c in line:
tileArr[ row_ctr ][ col_ctr ] = c
col_ctr = col_ctr+1
row_ctr = row_ctr + 1
col_ctr = 0
return tileArr
#end getMap
# end class OSMTileLoader
if __name__=="__main__":
#def __init__(self, (x,y,z), (sizeX, sizeY), kmlFile, cacheUrl ):
T = OSMTileLoader((55,55), "http://tile.openstreetmap.org", "tile.openstreetmap.org" )
print T.getTile( 0,0,1 )
| bsd-3-clause | -7,132,981,872,047,901,000 | 39.820513 | 125 | 0.605842 | false |
texttochange/vusion-backend | vusion/persist/content_variable/content_variable_table.py | 1 | 4240 | from vusion.persist import Model
class ContentVariableTable(Model):
MODEL_TYPE = 'content_variable_table'
MODEL_VERSION = '2'
fields= {
'name': {
'required': True
},
'columns': {
'required': True
},
'column-key-selection': {
'required': True
}
}
def upgrade(self, **kwargs):
if kwargs['model-version'] == '1':
kwargs['column-key-selection'] = 'auto'
kwargs['model-version'] = '2'
return kwargs
def validate_fields(self):
self._validate(self, self.fields)
def _find_indexes(self, match):
key1_indexes = self._get_indexes(self['columns'][0], match['key1'])
if not key1_indexes:
return None
if 'key3' in match:
key2_indexes = self._get_indexes(self['columns'][1], match['key2'])
if len(key1_indexes & key2_indexes) == 0:
return None
row_index = (key1_indexes & key2_indexes).pop()
col_index = self._get_column_index(match['key3'])
else:
row_index = key1_indexes.pop()
col_index = self._get_column_index(match['key2'])
if col_index is None or row_index is None:
return None
return {'col_index': col_index, 'row_index': row_index}
def get_value(self, match):
indexes = self._find_indexes(match)
if indexes is None:
return None
return self._get_index_value(indexes['col_index'],
indexes['row_index'])
def set_value(self, match, value, upsert=True):
indexes = self._find_indexes(match)
if indexes is None:
if not upsert:
False
self._add_match(match)
indexes = self._find_indexes(match)
self._set_index_value(indexes['col_index'],
indexes['row_index'],
value)
return True
## function that will add the necessary col or row for a match
def _add_match(self, match):
if 'key3' not in match:
if not self._get_indexes(self['columns'][0], match['key1']):
self._create_row(match['key1'], None)
if not self._get_column_index(match['key2']):
self._create_column(match['key2'])
else:
key1_indexes = self._get_indexes(self['columns'][0], match['key1'])
key2_indexes = self._get_indexes(self['columns'][1], match['key2'])
if len(key1_indexes & key2_indexes) == 0:
self._create_row(match['key1'], match['key2'])
if not self._get_column_index(match['key3']):
self._create_column(match['key3'])
def _create_column(self, key):
index = self._count_columns()
values = []
for i in range(0, self._count_rows()):
values.append(None)
self['columns'].append(
{'header': key,
'values': values,
'validation': None,
'type': 'contentvariable'})
def _create_row(self, key1, key2):
index = 1
self['columns'][0]['values'].append(key1)
if not key2 is None:
self['columns'][1]['values'].append(key2)
index = 2
for i in range(index, self._count_columns()):
self['columns'][i]['values'].append(None)
def _count_columns(self):
return len(self['columns'])
def _count_rows(self):
return len(self['columns'][0]['values'])
def _get_index_value(self, col_index, row_index):
return self['columns'][col_index]['values'][row_index]
def _set_index_value(self, col_index, row_index, value):
self['columns'][col_index]['values'][row_index] = value
def _get_indexes(self, column, key):
indexes = set()
for i,x in enumerate(column['values']):
if x == key:
indexes.add(i)
return indexes
def _get_column_index(self, key):
for i, col in enumerate(self['columns']):
if col['header'] == key:
return i
return None
| bsd-3-clause | -1,479,600,885,574,234,400 | 33.471545 | 79 | 0.519811 | false |
cfe-lab/MiCall | micall/tests/test_trim_fastqs.py | 1 | 17740 | import csv
import os
from io import BytesIO
from io import StringIO
import unittest
from pathlib import Path
import pytest
from micall.core.trim_fastqs import censor, trim, cut_all
from micall.utils.translation import reverse_and_complement
class CensorTest(unittest.TestCase):
def setUp(self):
self.addTypeEqualityFunc(str, self.assertMultiLineEqual)
self.original_bytes = b"""\
@M01841:45:000000000-A5FEG:1:1101:5296:13227 1:N:0:9
ACGT
+
AAAA
"""
self.original_unicode = self.original_bytes.decode()
self.original_file = BytesIO(self.original_bytes)
self.bad_cycles = []
self.censored_file = StringIO()
self.summary_file = StringIO()
self.summary_writer = csv.DictWriter(self.summary_file,
['avg_quality', 'base_count'],
lineterminator=os.linesep)
self.summary_writer.writeheader()
def testNoBadCycles(self):
expected_text = self.original_unicode
censor(self.original_file,
self.bad_cycles,
self.censored_file,
use_gzip=False)
self.assertEqual(expected_text, self.censored_file.getvalue())
def testBadCycle(self):
self.bad_cycles = [{'tile': '1101', 'cycle': '3'}]
expected_text = """\
@M01841:45:000000000-A5FEG:1:1101:5296:13227 1:N:0:9
ACNT
+
AA#A
"""
censor(self.original_file,
self.bad_cycles,
self.censored_file,
use_gzip=False)
self.assertEqual(expected_text, self.censored_file.getvalue())
def testBadTail(self):
self.bad_cycles = [{'tile': '1101', 'cycle': '3'},
{'tile': '1101', 'cycle': '4'}]
expected_text = """\
@M01841:45:000000000-A5FEG:1:1101:5296:13227 1:N:0:9
AC
+
AA
"""
censor(self.original_file,
self.bad_cycles,
self.censored_file,
use_gzip=False)
self.assertEqual(expected_text, self.censored_file.getvalue())
def testDifferentTile(self):
self.bad_cycles = [{'tile': '1102', 'cycle': '3'}]
expected_text = self.original_unicode
censor(self.original_file,
self.bad_cycles,
self.censored_file,
use_gzip=False)
self.assertEqual(expected_text, self.censored_file.getvalue())
def testDifferentDirection(self):
""" Bad cycle doesn't match this read. """
self.original_bytes = b"""\
@M01841:45:000000000-A5FEG:1:1101:5296:13227 2:N:0:9
ACGT
+
AAAA
"""
self.original_file = BytesIO(self.original_bytes)
self.bad_cycles = [{'tile': '1101', 'cycle': '3'}]
expected_text = self.original_bytes.decode()
censor(self.original_file,
self.bad_cycles,
self.censored_file,
use_gzip=False,
cycle_sign=-1)
self.assertEqual(expected_text, self.censored_file.getvalue())
def testReverseDirection(self):
self.original_bytes = b"""\
@M01841:45:000000000-A5FEG:1:1101:5296:13227 2:N:0:9
ACGT
+
AAAA
"""
self.original_file = BytesIO(self.original_bytes)
self.bad_cycles = [{'tile': '1101', 'cycle': '-3'}]
expected_text = """\
@M01841:45:000000000-A5FEG:1:1101:5296:13227 2:N:0:9
ACNT
+
AA#A
"""
censor(self.original_file,
self.bad_cycles,
self.censored_file,
use_gzip=False,
cycle_sign=-1)
self.assertEqual(expected_text, self.censored_file.getvalue())
def testTwoReads(self):
self.original_bytes = b"""\
@M01841:45:000000000-A5FEG:1:1101:5296:13227 1:N:0:9
ACGT
+
AAAA
@M01841:45:000000000-A5FEG:1:1102:1234:12345 1:N:0:9
TGCA
+
BBBB
"""
self.original_file = BytesIO(self.original_bytes)
self.bad_cycles = [{'tile': '1101', 'cycle': '2'},
{'tile': '1102', 'cycle': '3'}]
expected_text = """\
@M01841:45:000000000-A5FEG:1:1101:5296:13227 1:N:0:9
ANGT
+
A#AA
@M01841:45:000000000-A5FEG:1:1102:1234:12345 1:N:0:9
TGNA
+
BB#B
"""
censor(self.original_file,
self.bad_cycles,
self.censored_file,
use_gzip=False)
self.assertEqual(expected_text, self.censored_file.getvalue())
def testSummary(self):
self.bad_cycles = [{'tile': '1101', 'cycle': '3'}]
expected_summary = """\
avg_quality,base_count
32.0,4
"""
censor(self.original_file,
self.bad_cycles,
self.censored_file,
use_gzip=False,
summary_writer=self.summary_writer)
self.assertEqual(expected_summary, self.summary_file.getvalue())
def testSummaryAverage(self):
self.original_bytes = b"""\
@M01841:45:000000000-A5FEG:1:1101:5296:13227 1:N:0:9
ACGT
+
AACC
"""
self.original_file = BytesIO(self.original_bytes)
self.bad_cycles = [{'tile': '1101', 'cycle': '3'}]
expected_summary = """\
avg_quality,base_count
33.0,4
"""
censor(self.original_file,
self.bad_cycles,
self.censored_file,
use_gzip=False,
summary_writer=self.summary_writer)
self.assertEqual(expected_summary, self.summary_file.getvalue())
def testSummaryEmpty(self):
self.original_bytes = b""
self.original_file = BytesIO(self.original_bytes)
expected_summary = """\
avg_quality,base_count
,0
"""
censor(self.original_file,
self.bad_cycles,
self.censored_file,
use_gzip=False,
summary_writer=self.summary_writer)
self.assertEqual(expected_summary, self.summary_file.getvalue())
def test_trim(tmpdir):
read1_content = 'TATCTACTAACTGTCGGTCTAC'
read2_content = reverse_and_complement(read1_content)
expected1 = build_fastq(read1_content)
expected2 = build_fastq(read2_content)
tmp_path = Path(str(tmpdir))
fastq1_path = tmp_path / 'read1.fastq'
fastq2_path = tmp_path / 'read2.fastq'
trimmed1_path = tmp_path / 'trimmed1.fastq'
trimmed2_path = tmp_path / 'trimmed2.fastq'
fastq1_path.write_text(expected1)
fastq2_path.write_text(expected2)
trim([fastq1_path, fastq2_path],
'no_bad_cycles.csv',
[str(trimmed1_path), str(trimmed2_path)],
use_gzip=False)
trimmed1 = trimmed1_path.read_text()
trimmed2 = trimmed2_path.read_text()
assert trimmed1 == expected1
assert trimmed2 == expected2
@pytest.mark.parametrize(
"scenario,read1,read2,expected1,expected2",
[
('no adapter',
'TGGAAGGGCTAATTCACTCCCAACG',
# REF
'CGTTGGGAGTGAATTAGCCCTTCCA',
# rev(REF)
'TGGAAGGGCTAATTCACTCCCAACG',
# unchanged
'CGTTGGGAGTGAATTAGCCCTTCCA',
# unchanged
),
('full adapters',
'TGGAAGGGCTAATTCACTCCCAACGCTGTCTCTTATACACATCTCCGAGCCCACGAGAC',
# REF ][ rev(ADAPTER2)
'CGTTGGGAGTGAATTAGCCCTTCCACTGTCTCTTATACACATCTGACGCTGCCGACGA',
# rev(REF) ][ rev(ADAPTER1)
'TGGAAGGGCTAATTCACTCCCAACG',
# REF
'CGTTGGGAGTGAATTAGCCCTTCCA',
# rev(REF)
),
('full adapters plus garbage',
'TGGAAGGGCTAATTCACTCCCAACGCTGTCTCTTATACACATCTCCGAGCCCACGAGACCAGTACGCA',
# REF ][ rev(ADAPTER2) ][ garbage
'CGTTGGGAGTGAATTAGCCCTTCCACTGTCTCTTATACACATCTGACGCTGCCGACGAAAGTAGCAAC',
# rev(REF) ][ rev(ADAPTER1) ][ garbage
'TGGAAGGGCTAATTCACTCCCAACG',
# REF
'CGTTGGGAGTGAATTAGCCCTTCCA',
# rev(REF)
),
('partial adapters',
'TGGAAGGGCTAATTCACTCCCAACGCTGTCTCTTATACACATCTCCGAG',
# REF ][ partial rev(ADAPTER2)
'CGTTGGGAGTGAATTAGCCCTTCCACTGTCTCTTATACACATCTGACGC',
# rev(REF) ][ partial rev(ADAPTER1)
'TGGAAGGGCTAATTCACTCCCAACG',
# REF
'CGTTGGGAGTGAATTAGCCCTTCCA',
# rev(REF)
),
('partial adapters plus garbage',
'TGGAAGGGCTAATTCACTCCCAACGCTGTCTCTTATACACATCTCCGAGCCAGTACGCA',
# REF ][ partial rev(ADAPTER2)][ garbage
'CGTTGGGAGTGAATTAGCCCTTCCACTGTCTCTTATACACATCTGACGCAAGTAGCAAC',
# rev(REF) ][ partial rev(ADAPTER1)][ garbage
'TGGAAGGGCTAATTCACTCCCAACGCTGTCTCTTATACACATCTCCGAGCCAGTACGCA',
# unchanged, because partial adapters only trimmed off the end
'CGTTGGGAGTGAATTAGCCCTTCCACTGTCTCTTATACACATCTGACGCAAGTAGCAAC',
# unchanged
),
('no primers',
'TGGAAGGGCTAATTCACTCCCAACG',
# REF
'CGTTGGGAGTGAATTAGCCCTTCCA',
# rev(REF)
'TGGAAGGGCTAATTCACTCCCAACG',
# unchanged
'CGTTGGGAGTGAATTAGCCCTTCCA',
# unchanged
),
('full right primers',
'TGGAAGGGCTAATTCACTCCCAACGGAGGCACGTCAACATCTTAAAGATG',
# REF ][ rev(RIGHT)
'CATCTTTAAGATGTTGACGTGCCTCCGTTGGGAGTGAATTAGCCCTTCCA',
# RIGHT ][ rev(REF)
'TGGAAGGGCTAATTCACTCCCAACG',
# REF
'CGTTGGGAGTGAATTAGCCCTTCCA',
# rev(REF)
),
('partial right primers',
'TGGAAGGGCTAATTCACTCCCAACGGAGGCACGTCAACATCTTAA',
# REF ][ partial rev(RIGHT)
'TTAAGATGTTGACGTGCCTCCGTTGGGAGTGAATTAGCCCTTCCA',
# partial RIGHT ][ rev(REF)
'TGGAAGGGCTAATTCACTCCCAACG',
# REF
'CGTTGGGAGTGAATTAGCCCTTCCA',
# rev(REF)
),
('full left primers',
'ACCAACCAACTTTCGATCTCTTGTTGGAAGGGCTAATTCACTCCCAACG',
# LEFT ][ REF
'CGTTGGGAGTGAATTAGCCCTTCCAACAAGAGATCGAAAGTTGGTTGGT',
# rev(REF) ][ rev(LEFT)
'TGGAAGGGCTAATTCACTCCCAACG',
# REF
'CGTTGGGAGTGAATTAGCCCTTCCA',
# rev(REF)
),
('partial left primers',
'ACCAACTTTCGATCTCTTGTTGGAAGGGCTAATTCACTCCCAACG',
# partial LEFT ][ REF
'CGTTGGGAGTGAATTAGCCCTTCCAACAAGAGATCGAAAGTTGG',
# rev(REF) ][ rev(partial LEFT)
'TGGAAGGGCTAATTCACTCCCAACG',
# REF
'CGTTGGGAGTGAATTAGCCCTTCCA',
# rev(REF)
),
('partial left primers plus garbage',
'CATAAGGATACCAACTTTCGATCTCTTGTTGGAAGGGCTAATTCACTCCCAACG',
# garbage][ partial LEFT ][ REF
'CGTTGGGAGTGAATTAGCCCTTCCAACAAGAGATCGAAAGTTGGATCCTTATG',
# rev(REF) ][ rev(part LEFT) ][garbage
'CATAAGGATACCAACTTTCGATCTCTTGTTGGAAGGGCTAATTCACTCCCAACG',
# unchanged
'CGTTGGGAGTGAATTAGCCCTTCCAACAAGAGATCGAAAGTTGGATCCTTATG',
# unchanged
),
('full left primers plus garbage',
'CATAAGGATACCAACCAACTTTCGATCTCTTGTTGGAAGGGCTAATTCACTCCCAACG',
# garbage][ LEFT ][ REF
'CGTTGGGAGTGAATTAGCCCTTCCAACAAGAGATCGAAAGTTGGTTGGTATCCTTATG',
# rev(REF) ][ rev(LEFT) ][garbage
'CATAAGGATACCAACCAACTTTCGATCTCTTGTTGGAAGGGCTAATTCACTCCCAACG',
# unchanged
'CGTTGGGAGTGAATTAGCCCTTCCAACAAGAGATCGAAAGTTGGTTGGTATCCTTATG',
# unchanged
),
('full right primers plus garbage',
'TGGAAGGGCTAATTCACTCCCAACGGAGGCACGTCAACATCTTAAAGATGATGCACTT',
# REF ][ rev(RIGHT) ][garbage
'TACCGGACTCATCTTTAAGATGTTGACGTGCCTCCGTTGGGAGTGAATTAGCCCTTCCA',
# garbage][ RIGHT ][ rev(REF)
'TGGAAGGGCTAATTCACTCCCAACGGAGGCACGTCAACATCTTAAAGATGATGCACTT',
# unchanged
'TACCGGACTCATCTTTAAGATGTTGACGTGCCTCCGTTGGGAGTGAATTAGCCCTTCCA',
# unchanged
),
('left and right primers',
'ACCAACCAACTTTCGATCTCTTGTTGGAAGGGCTAATTCACTCCCAACGGAGGCACGTCAACATCTTAAAGATG',
# LEFT ][ REF ][ rev(RIGHT) ]
'CATCTTTAAGATGTTGACGTGCCTCCGTTGGGAGTGAATTAGCCCTTCCAACAAGAGATCGAAAGTTGGTTGGT',
# RIGHT ][ rev(REF) ][ rev(LEFT)
'TGGAAGGGCTAATTCACTCCCAACG',
# REF
'CGTTGGGAGTGAATTAGCCCTTCCA',
# rev(REF)
),
('left primer in read 1 only',
'ACCAACCAACTTTCGATCTCTTGTTGGAAGGGCTAATTCACTCCCAACG',
# LEFT ][ REF
'CGTTGGGAGTGAATTAGCCCTTC',
# rev(REF) ]
'TGGAAGGGCTAATTCACTCCCAACG',
# REF
'CGTTGGGAGTGAATTAGCCCTTC',
# rev(REF)
),
('right primers plus adapters',
'TGGAAGGGCTAATTCACTCCCAACGGAGGCACGTCAACATCTTAAAGATGCTGTCTCTTATACACATCTCCGAGCCCACGAGAC',
# REF ][ rev(RIGHT) ][ rev(ADAPTER2)
'CATCTTTAAGATGTTGACGTGCCTCCGTTGGGAGTGAATTAGCCCTTCCACTGTCTCTTATACACATCTGACGCTGCCGACGA',
# RIGHT ][ rev(REF) ][ rev(ADAPTER1)
'TGGAAGGGCTAATTCACTCCCAACG',
# REF
'CGTTGGGAGTGAATTAGCCCTTCCA',
# rev(REF)
),
('primer dimer',
'TGGAAATACCCACAAGTTAATGGTTTAACAGGCACAGGTGTCTGTCTCTTATACACATCTCCGAGCCCACGAGACACTACCTGGAA',
# nCoV-2019_18_LEFT ] [ rev(ADAPTER2) ][ garbage
# [ rev(..._76_RIGHT) ]
'ACACCTGTGCCTGTTAAACCATTAACTTGTGGGTATTTCCACTGTCTCTTATACACATCTGACGCTGCCGACGAAGGTTCTCAGGA',
# nCoV-2019_76_RIGHT ] [ rev(ADAPTER1) ][ garbage
# [ rev(nCoV-2019_18_LEFT) ]
'',
# Trimmed to nothing
'',
# Trimmed to nothing
),
('primer dimer with partial right match',
'TGGCTATTGATTATAAACACTACACACCCTGCACAAGAAAAGAACTTCACCTGTCTCTTATACACATCTCCGAGCCCACGAGACACTACCTGGAA',
# nCoV-2019_21_LEFT ] [ rev(ADAPTER2) ][ garbage ]
# [ rev(..._81_RIGHT) ] last 5 of 81_RIGHT match start of HCV_Pr3
'GTGAAGTTCTTTTCTTGTGCAGGGTGTGTAGTGTTTATAATCAATAGCCACTGTCTCTTATACACATCTGACGCTGCCGACGAAGGTTCTCAGGA',
# nCoV-2019_81_RIGHT ] [ rev(ADAPTER1) ][ garbage
# [ rev(nCoV-2019_21_LEFT) ]
'',
# Trimmed to nothing
'',
# Trimmed to nothing
),
('primer dimer with read error',
'TGGAAATACCCACAAGTTAATGGTTTAACAGGCAAAGGTGTCTGTCTCTTATACACATCTCCGAGCCCACGAGACACTACCTGGAA',
# nCoV-2019_18_LEFT ] [ rev(ADAPTER2) ][ garbage
# [ rev(..._76_RIGHT) ]
# read error ^
'ACACCTTTGCCTGTTAAACCATTAACTTGTGGGTATTTCCACTGTCTCTTATACACATCTGACGCTGCCGACGAAGGTTCTCAGGA',
# nCoV-2019_76_RIGHT ] [ rev(ADAPTER1) ][ garbage
# [ rev(nCoV-2019_18_LEFT) ]
# ^ read error
'',
# Trimmed to nothing
'',
# Trimmed to nothing
),
('one empty read',
'TGGAAATACCCACAAGTTAATGGTTTAACCTGTCTCTTATACACATCTCCGAGCCCACGAGACACTACCTGGAA',
# nCoV-2019_18_LEFT ][ rev(ADAPTER2) ][ garbage
'CGTTGGGAGTGAATTAGCCCTTCCACTGTCTCTTATACACATCTGACGCTGCCGACGAAGGTTCTCAGGA',
# rev(REF) ][ rev(ADAPTER1) ][ garbage
'',
# Trimmed to nothing
'',
# Trimmed to nothing, because mate was.
),
('happens to match start of primer',
'TCGCCGACCTCATGGGGTACATACCGCTCGTCGGCGCCCCTCTTGGAGGC',
# HCV-1a ][ Still HCV-1a, start of nCoV-2019_1_RIGHT
'GCCTCCAAGAGGGGCGCCGACGAGCGGTATGTACCCCATGAGGTCGGCGA',
# ][ rev(HCV-1a)
'TCGCCGACCTCATGGGGTACATACCGCTCGTCGGCGCCCCTCTTGGAGGC',
# unchanged
'GCCTCCAAGAGGGGCGCCGACGAGCGGTATGTACCCCATGAGGTCGGCGA',
# unchanged
)
])
def test_cut_adapters(tmpdir: str,
scenario: str,
read1: str,
read2: str,
expected1: str,
expected2: str):
""" Cut adapter sequence from a read pair.
The reference section is pulled from the start of HXB2:
TGGAAGGGCTAATTCACTCCCAACG
Reverse complement of that is:
CGTTGGGAGTGAATTAGCCCTTCCA
Nextera Read 1 adapter:
TCGTCGGCAGCGTCAGATGTGTATAAGAGACAG
Reverse complement:
CTGTCTCTTATACACATCTGACGCTGCCGACGA
Nextera Read 2 adapter:
GTCTCGTGGGCTCGGAGATGTGTATAAGAGACAG
Reverse complement:
CTGTCTCTTATACACATCTCCGAGCCCACGAGAC
Left primer nCoV-2019_1_LEFT:
ACCAACCAACTTTCGATCTCTTGT
Reverse complement:
ACAAGAGATCGAAAGTTGGTTGGT
Right primer nCoV-2019_1_RIGHT (matches reverse complement of reference):
CATCTTTAAGATGTTGACGTGCCTC
Reverse complement (matches forward reference):
GAGGCACGTCAACATCTTAAAGATG
"""
tmp_path = Path(tmpdir)
fastq1_path = tmp_path / 'read1.fastq'
fastq2_path = tmp_path / 'read2.fastq'
trimmed1_path = tmp_path / 'trimmed1.fastq'
trimmed2_path = tmp_path / 'trimmed2.fastq'
fastq1_path.write_text(build_fastq(read1))
fastq2_path.write_text(build_fastq(read2))
expected_trimmed1 = build_fastq(expected1)
expected_trimmed2 = build_fastq(expected2)
cut_all(fastq1_path,
fastq2_path,
trimmed1_path,
trimmed2_path,
project_code='SARSCOV2')
assert trimmed1_path.read_text() == expected_trimmed1
assert trimmed2_path.read_text() == expected_trimmed2
def build_fastq(read_sequence):
if not read_sequence:
# Eliminate reads that get trimmed to nothing.
return ''
# Write two reads in the file to test primer dimer caching.
expected_quality1 = 'A' * len(read_sequence)
expected_trimmed1 = f'''\
@pair1
{read_sequence}
+
{expected_quality1}
@pair2
{read_sequence}
+
{expected_quality1}
'''
return expected_trimmed1
| agpl-3.0 | -4,264,283,094,671,327,700 | 32.471698 | 104 | 0.603439 | false |
phantomii/restalchemy | examples/migrations/8d3025-2st-migration.py | 1 | 1163 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2016 Eugene Frolov <[email protected]>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from restalchemy.storage.sql import migrations
class MigrationStep(migrations.AbstarctMigrationStep):
def __init__(self):
self._depends = ["bf4d04-1st-migration.py"]
@property
def migration_id(self):
return "8d302575-a1ce-43db-b312-e070e8d0cf7f"
def upgrade(self, session):
six.print_("upgrade 2st")
def downgrade(self, session):
six.print_("downgrade 2st")
migration_step = MigrationStep()
| apache-2.0 | -8,124,373,744,143,169,000 | 28.075 | 78 | 0.706793 | false |
brendangregg/bcc | tools/exitsnoop.py | 1 | 10349 | #!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
from __future__ import print_function
import argparse
import ctypes as ct
import os
import platform
import re
import signal
import sys
from bcc import BPF
from datetime import datetime
from time import strftime
#
# exitsnoop Trace all process termination (exit, fatal signal)
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: exitsnoop [-h] [-x] [-t] [--utc] [--label[=LABEL]] [-p PID]
#
_examples = """examples:
exitsnoop # trace all process termination
exitsnoop -x # trace only fails, exclude exit(0)
exitsnoop -t # include timestamps (local time)
exitsnoop --utc # include timestamps (UTC)
exitsnoop -p 181 # only trace PID 181
exitsnoop --label=exit # label each output line with 'exit'
"""
"""
Exit status (from <include/sysexits.h>):
0 EX_OK Success
2 argparse error
70 EX_SOFTWARE syntax error detected by compiler, or
verifier error from kernel
77 EX_NOPERM Need sudo (CAP_SYS_ADMIN) for BPF() system call
The template for this script was Brendan Gregg's execsnoop
https://github.com/iovisor/bcc/blob/master/tools/execsnoop.py
More information about this script is in bcc/tools/exitsnoop_example.txt
Copyright 2016 Netflix, Inc.
Copyright 2019 Instana, Inc.
Licensed under the Apache License, Version 2.0 (the "License")
07-Feb-2016 Brendan Gregg (Netflix) Created execsnoop
04-May-2019 Arturo Martin-de-Nicolas (Instana) Created exitsnoop
13-May-2019 Jeroen Soeters (Instana) Refactor to import as module
"""
def _getParser():
parser = argparse.ArgumentParser(
description="Trace all process termination (exit, fatal signal)",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=_examples)
a=parser.add_argument
a("-t", "--timestamp", action="store_true", help="include timestamp (local time default)")
a("--utc", action="store_true", help="include timestamp in UTC (-t implied)")
a("-p", "--pid", help="trace this PID only")
a("--label", help="label each line")
a("-x", "--failed", action="store_true", help="trace only fails, exclude exit(0)")
# print the embedded C program and exit, for debugging
a("--ebpf", action="store_true", help=argparse.SUPPRESS)
# RHEL 7.6 keeps task->start_time as struct timespec, convert to u64 nanoseconds
a("--timespec", action="store_true", help=argparse.SUPPRESS)
return parser.parse_args
class Global():
parse_args = _getParser()
args = None
argv = None
SIGNUM_TO_SIGNAME = dict((v, re.sub("^SIG", "", k))
for k,v in signal.__dict__.items() if re.match("^SIG[A-Z]+$", k))
class Data(ct.Structure):
"""Event data matching struct data_t in _embedded_c()."""
_TASK_COMM_LEN = 16 # linux/sched.h
_pack_ = 1
_fields_ = [
("start_time", ct.c_ulonglong), # task->start_time, see --timespec arg
("exit_time", ct.c_ulonglong), # bpf_ktime_get_ns()
("pid", ct.c_uint), # task->tgid, thread group id == sys_getpid()
("tid", ct.c_uint), # task->pid, thread id == sys_gettid()
("ppid", ct.c_uint),# task->parent->tgid, notified of exit
("exit_code", ct.c_int),
("sig_info", ct.c_uint),
("task", ct.c_char * _TASK_COMM_LEN)
]
def _embedded_c(args):
"""Generate C program for sched_process_exit tracepoint in kernel/exit.c."""
c = """
EBPF_COMMENT
#include <linux/sched.h>
BPF_STATIC_ASSERT_DEF
struct data_t {
u64 start_time;
u64 exit_time;
u32 pid;
u32 tid;
u32 ppid;
int exit_code;
u32 sig_info;
char task[TASK_COMM_LEN];
} __attribute__((packed));
BPF_STATIC_ASSERT(sizeof(struct data_t) == CTYPES_SIZEOF_DATA);
BPF_PERF_OUTPUT(events);
TRACEPOINT_PROBE(sched, sched_process_exit)
{
struct task_struct *task = (typeof(task))bpf_get_current_task();
if (FILTER_PID || FILTER_EXIT_CODE) { return 0; }
struct data_t data = {
.start_time = PROCESS_START_TIME_NS,
.exit_time = bpf_ktime_get_ns(),
.pid = task->tgid,
.tid = task->pid,
.ppid = task->parent->tgid,
.exit_code = task->exit_code >> 8,
.sig_info = task->exit_code & 0xFF,
};
bpf_get_current_comm(&data.task, sizeof(data.task));
events.perf_submit(args, &data, sizeof(data));
return 0;
}
"""
# TODO: this macro belongs in bcc/src/cc/export/helpers.h
bpf_static_assert_def = r"""
#ifndef BPF_STATIC_ASSERT
#define BPF_STATIC_ASSERT(condition) __attribute__((unused)) \
extern int bpf_static_assert[(condition) ? 1 : -1]
#endif
"""
code_substitutions = [
('EBPF_COMMENT', '' if not Global.args.ebpf else _ebpf_comment()),
("BPF_STATIC_ASSERT_DEF", bpf_static_assert_def),
("CTYPES_SIZEOF_DATA", str(ct.sizeof(Data))),
('FILTER_PID', '0' if not Global.args.pid else "task->tgid != %s" % Global.args.pid),
('FILTER_EXIT_CODE', '0' if not Global.args.failed else 'task->exit_code == 0'),
('PROCESS_START_TIME_NS', 'task->start_time' if not Global.args.timespec else
'(task->start_time.tv_sec * 1000000000L) + task->start_time.tv_nsec'),
]
for old,new in code_substitutions:
c = c.replace(old, new)
return c
def _ebpf_comment():
"""Return a C-style comment with information about the generated code."""
comment=('Created by %s at %s:\n\t%s' %
(sys.argv[0], strftime("%Y-%m-%d %H:%M:%S %Z"), _embedded_c.__doc__))
args = str(vars(Global.args)).replace('{','{\n\t').replace(', ',',\n\t').replace('}',',\n }\n\n')
return ("\n /*" + ("\n %s\n\n ARGV = %s\n\n ARGS = %s/" %
(comment, ' '.join(Global.argv), args))
.replace('\n','\n\t*').replace('\t',' '))
def _print_header():
if Global.args.timestamp:
title = 'TIME-' + ('UTC' if Global.args.utc else strftime("%Z"))
print("%-13s" % title, end="")
if Global.args.label is not None:
print("%-6s" % "LABEL", end="")
print("%-16s %-6s %-6s %-6s %-7s %-10s" %
("PCOMM", "PID", "PPID", "TID", "AGE(s)", "EXIT_CODE"))
def _print_event(cpu, data, size): # callback
"""Print the exit event."""
e = ct.cast(data, ct.POINTER(Data)).contents
if Global.args.timestamp:
now = datetime.utcnow() if Global.args.utc else datetime.now()
print("%-13s" % (now.strftime("%H:%M:%S.%f")[:-3]), end="")
if Global.args.label is not None:
label = Global.args.label if len(Global.args.label) else 'exit'
print("%-6s" % label, end="")
age = (e.exit_time - e.start_time) / 1e9
print("%-16s %-6d %-6d %-6d %-7.2f " %
(e.task.decode(), e.pid, e.ppid, e.tid, age), end="")
if e.sig_info == 0:
print("0" if e.exit_code == 0 else "code %d" % e.exit_code)
else:
sig = e.sig_info & 0x7F
if sig:
print("signal %d (%s)" % (sig, signum_to_signame(sig)), end="")
if e.sig_info & 0x80:
print(", core dumped ", end="")
print()
# =============================
# Module: These functions are available for import
# =============================
def initialize(arg_list = sys.argv[1:]):
"""Trace all process termination.
arg_list - list of args, if omitted then uses command line args
arg_list is passed to argparse.ArgumentParser.parse_args()
For example, if arg_list = [ '-x', '-t' ]
args.failed == True
args.timestamp == True
Returns a tuple (return_code, result)
0 = Ok, result is the return value from BPF()
1 = args.ebpf is requested, result is the generated C code
os.EX_NOPERM: need CAP_SYS_ADMIN, result is error message
os.EX_SOFTWARE: internal software error, result is error message
"""
Global.argv = arg_list
Global.args = Global.parse_args(arg_list)
if Global.args.utc and not Global.args.timestamp:
Global.args.timestamp = True
if not Global.args.ebpf and os.geteuid() != 0:
return (os.EX_NOPERM, "Need sudo (CAP_SYS_ADMIN) for BPF() system call")
if re.match('^3\.10\..*el7.*$', platform.release()): # Centos/Red Hat
Global.args.timespec = True
for _ in range(2):
c = _embedded_c(Global.args)
if Global.args.ebpf:
return (1, c)
try:
return (os.EX_OK, BPF(text=c))
except Exception as e:
error = format(e)
if (not Global.args.timespec
and error.find('struct timespec')
and error.find('start_time')):
print('This kernel keeps task->start_time in a struct timespec.\n' +
'Retrying with --timespec')
Global.args.timespec = True
continue
return (os.EX_SOFTWARE, "BPF error: " + error)
except:
return (os.EX_SOFTWARE, "Unexpected error: {0}".format(sys.exc_info()[0]))
def snoop(bpf, event_handler):
"""Call event_handler for process termination events.
bpf - result returned by successful initialize()
event_handler - callback function to handle termination event
args.pid - Return after event_handler is called, only monitoring this pid
"""
bpf["events"].open_perf_buffer(event_handler)
while True:
bpf.perf_buffer_poll()
if Global.args.pid:
return
def signum_to_signame(signum):
"""Return the name of the signal corresponding to signum."""
return Global.SIGNUM_TO_SIGNAME.get(signum, "unknown")
# =============================
# Script: invoked as a script
# =============================
def main():
try:
rc, buffer = initialize()
if rc:
print(buffer)
sys.exit(0 if Global.args.ebpf else rc)
_print_header()
snoop(buffer, _print_event)
except KeyboardInterrupt:
print()
sys.exit()
return 0
if __name__ == '__main__':
main()
| apache-2.0 | -4,594,911,471,573,904,400 | 36.361011 | 101 | 0.574935 | false |
airanmehr/Utils | Simulation.py | 1 | 40529 | '''
Copyleft Oct 10, 2015 Arya Iranmehr, PhD Student, Bafna's Lab, UC San Diego, Email: [email protected]
'''
from __future__ import division
import numpy as np;
import pandas as pd;
np.set_printoptions(linewidth=140, precision=5, suppress=True)
import subprocess, uuid, os,sys
import pylab as plt
import UTILS.Util as utl
stdout_old=sys.stdout;sys.stdout=open('/dev/null','w');import simuPOP as sim;sys.stdout=stdout_old # to avoid simuPop welcome message!
def sig(x): return 1./(1+np.exp(-x));
def logit(p): return (np.inf if p==1 else np.log(p/(1.-p)))
a='';
def fff(msg):
global a
a += msg
class MSMS:
@staticmethod
def Simulate(n=200, mu=2*1e-9, L=50000, Ne=1e6,r=1e-9,verbose=False,seed=None,intPos=False):
L=int(L)
a= MSMS.Song(F=n, mu=mu, L=L, Ne=Ne, r=r,verbose=verbose,seed=seed)
c=pd.Series(a.columns)
if c.round().value_counts().max()==1:
a.columns=c.round().astype(int)
elif c.astype(int).value_counts().max()==1:
a.columns = c.astype(int)
if intPos:
a.columns=map(int,np.sort(np.random.choice(L, a.shape[1], replace=False)))
return a
@staticmethod
def Song(F=200, mu=2*1e-9, L=50000, Ne=1e6,r=4e-9, uid=None, theta=None, msmsFile=None, dir=None,verbose=False,seed=None):
"""
Everything is exactly the sam
"""
# print 'mu: {} r:{} NE:{} ,theta={} '.format(mu,r,Ne,4*Ne*mu*L), theta
if msmsFile is not None:
pop=MSMS.load(filename=msmsFile)[0]
else:
if theta:
pop=MSMS.MSMS(n=F, numReps=1, theta=theta, rho=2*Ne*(L-1)*r, L=L, Ne=Ne, uid=uid, dir=dir,verbose=verbose,seed=seed)[0]
else:
pop=MSMS.MSMS(n=F, numReps=1, theta=2*Ne*mu*L, rho=2*Ne*(L-1)*r, L=L, Ne=Ne, uid=uid, dir=dir,verbose=verbose,seed=seed)[0]
pop.r=r
pop.Ne=Ne
pop.L=L
return pop
@staticmethod
def MSMS(n, numReps, theta, rho, L, Ne=None,uid=None,oneMutationEvery=None, dir=dir,verbose=False,seed=None):
"""
Returns a list of dataframe for each replicate
"""
if dir is None:
dir= utl.PATH.simout;dir+= 'msms/';
os.system('mkdir -p ' +dir)
if oneMutationEvery is not None:
nSS=L/oneMutationEvery
theta=nSS/sum(1./np.arange(1,n))
if uid is None:
uid=str(uuid.uuid4())
unique_filename = dir+uid+'.msms'
if seed is None:
seed=''
else:
seed=' -seed {} '.format(seed)
cmd="java -jar -Xmx2g ~/bin/msms/lib/msms.jar -ms {} {} -t {:.0f} -r {:.0f} {:.0f} -oFP 0.000000000000E00 {} > {}".format(n, numReps, theta, rho, L, seed,unique_filename)
if verbose:
print cmd
subprocess.call(cmd,shell=True)
return MSMS.load(unique_filename)
@staticmethod
def getSeed(filename):
file=open(filename);cmd=np.array(file.readline().strip().split(' '));seed=file.readline().strip()
return seed
@staticmethod
def load(filename):
n, R, L, posUnderSelection = MSMS.getParams(open(filename).readline())
lines=np.array(map(str.strip,open(filename).readlines()) )
posIdx= np.where(map(lambda x: x[:len('positions:')]=='positions:',lines))[0]
try:
theta = lines[np.where(map(lambda x: 'ThetaW Estimate Summaray:' in x, lines))[0][0]].split(':')[1].strip()
except:
theta = None
POS=[map(lambda x: (float(x)*L), lines[ii].split()[1:]) for ii in posIdx]
dfs=[pd.DataFrame(map(list ,lines[i +1 +range(n)]),columns=pos ) for i,pos in zip(posIdx,POS)]
for df in dfs:
df[df!='0']=1
df[df=='0']=0
df.L = L
if posUnderSelection is not None:
df.posUnderSelection = posUnderSelection * L
if theta is not None:
df.stat = pd.Series(theta.split(), index=['W', 'Pi', 'D']).astype(float)
return dfs
@staticmethod
def getParams(line):
"""
Args:
params: takes the first line of msmsm file
Returns:
n,R,L: number of individuals in the sample, the number of the replicates, genome length
"""
params=np.array(line.strip().split(' '))
offset=np.where(map(lambda x: 'ms'in x, params))[0][0]
if params[offset+1] == '-N':
i=3
else:
i=1
posUnderSelection = None
if '-Sp' in params: posUnderSelection = float(params[np.where(params == '-Sp')[0][0] + 1])
return int(params[offset + i]), int(params[offset + i + 1]), int(
params[np.where(params == '-r')[0][0] + 2]), posUnderSelection
@staticmethod
def fixDuplicatePositions(pos,L):
pos=pd.Series(range(len(pos)),index=pos)
posHits=pos.index.value_counts()
invalidPOS=posHits[posHits>1]
if not invalidPOS.shape[0]:
return pos.index.values
for invalidPos in invalidPOS.index:
mini=pos.loc[invalidPos].min()
maxi=pos.loc[invalidPos].max()
lowerBound=pos[pos==mini-1].index.max()
upperBound=pos[pos==maxi+1].index.min();
if maxi==pos.shape[0]-1: upperBound=L
if mini==0: lowerBound=0
validRange=np.arange((upperBound-lowerBound)/2) # only second and third quartiles,
offset=validRange+validRange.shape[0]/2 # first qunatulw
newPos=pos.index.values;
newPos[mini:maxi+1]=np.sort(np.random.choice(offset,pos.loc[invalidPos].shape[0],replace=False))+lowerBound
pos.index=newPos
assert pos.index.value_counts().max()==1
return pos.index.values
@staticmethod
def Selection(msms, Ne, n, numReplicates, theta, rho, window_size, s, origin_count, posUnderSelection, gens, path):
seed = ''
for ii, gen in enumerate(gens):
fname = path + '{}.msms'.format(int(gen))
if (not ii) and s != 0:
# while (nu0 < 0.95) or (nu0 > 0.99):
cmd = "{} -N {} -ms {} {} -t {} -r {} {:.0f} -SAA {} -SaA {} -SI {} 1 {} -Sp {} -oOC -Smark -oFP 0.000000000000E00 {} -SForceKeep -SFC -oTW >{}".format(
msms, Ne, n, numReplicates, theta, rho, window_size, 2 * Ne * s, Ne * s, gen / (4. * Ne),
origin_count / Ne,
posUnderSelection, ('-seed {}'.format(seed), '')[seed is ''], fname)
os.system(cmd)
else:
cmd = "{} -N {} -ms {} {} -t {} -r {} {:.0f} -SAA {} -SaA {} -SI {} 1 {} -Sp {} -oOC -Smark -oFP 0.000000000000E00 {} -SFC -SForceKeep -oTW >{}".format(
msms, Ne, n, numReplicates, theta, rho, window_size, 2 * Ne * s, Ne * s, gen / (4. * Ne),
origin_count / Ne,
posUnderSelection, ('-seed {}'.format(seed), '')[seed is ''], fname)
os.system(cmd)
if not ii: seed = MSMS.getSeed(fname)
@staticmethod
def SelectionFinale(msms, Ne, n, numReplicates, theta, rho, window_size, s, origin_count, posUnderSelection, gens,
path):
seed = ''
nu0 = 0
for ii, gen in enumerate(gens):
fname = path + '{}.msms'.format(int(gen))
if (not ii) and s != 0:
while (nu0 < 0.9):
cmd = "{} -N {} -ms {} {} -t {} -r {} {:.0f} -SAA {} -SaA {} -SI {} 1 {} -Sp {} -oOC -Smark -oFP 0.000000000000E00 {} -SForceKeep -SFC -oTW >{}".format(
msms, Ne, n, numReplicates, theta, rho, window_size, 2 * Ne * s, Ne * s, gen / (4. * Ne),
origin_count / Ne,
posUnderSelection, ('-seed {}'.format(seed), '')[seed is ''], fname)
os.system(cmd)
nu0 = MSMS.load(fname)[0].mean(0).loc[25000]
else:
cmd = "{} -N {} -ms {} {} -t {} -r {} {:.0f} -SAA {} -SaA {} -SI {} 1 {} -Sp {} -oOC -Smark -oFP 0.000000000000E00 {} -SFC -SForceKeep -oTW >{}".format(
msms, Ne, n, numReplicates, theta, rho, window_size, 2 * Ne * s, Ne * s, gen / (4. * Ne),
origin_count / Ne,
posUnderSelection, ('-seed {}'.format(seed), '')[seed is ''], fname)
os.system(cmd)
if not ii: seed = MSMS.getSeed(fname)
@staticmethod
def SelectionNu(msms, Ne, n, numReplicates, theta, rho, window_size, s, posUnderSelection, nu, path=None):
seed = ''
if path is None: path = '~/tmp.msms'
fname = path + '{}.msms'.format(nu)
cmd = "{} -N {} -ms {} {} -t {} -r {} {:.0f} -SAA {} -SaA {} -SF 0 {} -Sp {} -oOC -Smark -oFP 0.000000000000E00 {} -SFC -oTW >{}".format(
msms, Ne, n, numReplicates, theta, rho, window_size, 2 * Ne * s, Ne * s, nu, posUnderSelection,
('-seed {}'.format(seed), '')[seed is ''], fname)
print cmd
os.system(cmd)
return MSMS.load(fname)
@staticmethod
def SelectionNuForward(msms, Ne, n, numReplicates, theta, rho, window_size, s, origin_count, posUnderSelection,
gens, path):
nu0 = 0
for ii, gen in enumerate(gens):
fname = path + '{}.msms'.format(gen)
if (not ii) and s != 0:
while (nu0 < 0.95) or (nu0 > 0.99):
cmd = "{} -N {} -ms {} {} -t {} -r {} {:.0f} -SAA {} -SaA {} -SI {} 1 {} -Sp {} -oOC -Smark -oFP 0.000000000000E00 {} -SFC -oTW >{}".format(
msms, Ne, n, numReplicates, theta, rho, window_size, 2 * Ne * s, Ne * s, gen / (4. * Ne),
origin_count / Ne,
posUnderSelection, ('-seed {}'.format(seed), '')[seed is ''], fname)
os.system(cmd)
nu0 = MSMS.load(fname)[0].mean(0).loc[25000]
print nu0, gen, cmd
if not ii: seed = MSMS.getSeed(fname)
class Simulation:
@staticmethod
def setSeed(seed):
if seed is None: return
sim.setRNG('rand', seed + 1);
np.random.seed(seed)
@staticmethod
def load(ExperimentName, s=0.1, L=50000, experimentID=0, nu0=0.005, isFolded=False, All=False, startGeneration=0,
maxGeneration=50, numReplicates=3, numSamples=5, step=10, replicates=None, coverage=np.inf):
path='{}{}/simpop/'.format(utl.PATH.simout, ExperimentName) + Simulation.getSimulationName(s=s, L=L, experimentID=experimentID, initialCarrierFreq=nu0, isFolded=isFolded) + '.pkl'
sim= pd.read_pickle(path)
sim.savedPath=path
if replicates is not None: sim.setReplicates(sorted(replicates))
elif numReplicates is not None: sim.setReplicates(range(numReplicates))
if coverage != np.inf:
sim.Xi = sim.X
sim.X = sim.C.loc[coverage] / sim.D.loc[coverage].astype(float)
sim.X = np.array(map(lambda x: utl.roundto(x, 5), sim.X.reshape(-1) * 1e4)).reshape(sim.X.shape) / 1e4
sim.CD=sim.getCD(coverage)
sim.CD.columns.names=['REP','GEN','READ']
if not All: sim.setSamplingTimes(maxGeneration=min(maxGeneration,sim.getGenerationTimes()[-1]),numSamples=numSamples,step=step,startGeneration=startGeneration)
return sim
@staticmethod
def getSimulationName(s,L,experimentID,initialCarrierFreq,isFolded,msms=False):
if msms:
return 'L{:.0f}K.{:04.0f}'.format(L/1000,experimentID)
if s:
return 'Nu{:E}.s{:E}.L{:.0f}K.{:04.0f}{}'.format(np.round(float(initialCarrierFreq), 3), s, L / 1000,
experimentID, ('', '.Folded')[isFolded])
else:
return 'Nu{:E}.s{:E}.L{:.0f}K.{:04.0f}{}'.format(0, s * 100, L / 1000, experimentID,
('', '.Folded')[isFolded])
def setReplicates(self,replicates):
self.numReplicates=len(replicates)
self.X=self.X[:,:,replicates]
self.C = self.C.apply(lambda x: x[:, :, replicates])
self.D = self.D.apply(lambda x: x[:, :, replicates])
def __init__(self, outpath=utl.PATH.simout, N=1000, generationStep=10, maxGeneration=None,
s=0.05, r=4e-9, Ne=1e6, mu=2e-9, F=200, h=0.5, L=50000, startGeneration=0, numReplicates=3, H0=None,
foldInitialAFs=False, save=True, foutName=None,
doForwardSimulationNow=True, experimentID=-1,
msmsFile=None,initialCarrierFreq=0, ExperimentName=None, simulateNeutrallyFor=0,
initialNeutralGenerations=0, ignoreInitialNeutralGenerations=True,
makeSureSelectedSiteDontGetLost=True, onlyKeep=None, verbose=0, sampingTimes=None, minIncrease=0,
model=None,initDiploidPop=None,posUnderSelection=-1,haplotypes=False,seed=None,recombinator=None
):
"""
A General Simulation Class; with params
H0: Dataframe F x m for F individuals and m segregation sites ; Initial Haplotypes; dataframe with columns as positions
"""
self.recombinator=recombinator
if seed is not None:
Simulation.setSeed(seed)
self.s = s;
self.r = r;
self.Ne = Ne;
self.mu = mu;
self.F = F;
self.h = h;
self.L = int(L);
self.startGeneration = startGeneration;
self.numReplicates = numReplicates;
self.posUnderSelection = -1
self.initDiploidPop = initDiploidPop
self.initialCarrierFreq= initialCarrierFreq if initialCarrierFreq else 1./self.F
if foutName is not None:
self.uid=foutName
self.uidMSMS=None
elif experimentID>=0:
self.uid=Simulation.getSimulationName(self.s, self.L, self.experimentID, initialCarrierFreq=self.initialCarrierFreq, isFolded=self.foldInitialAFs)
self.uidMSMS=Simulation.getSimulationName(self.s, self.L, self.experimentID, initialCarrierFreq=self.initialCarrierFreq, isFolded=self.foldInitialAFs,msms=True)
else:
self.uid=str(uuid.uuid4())
self.uidMSMS=self.uid
if H0 is None:
self.simulateH0()
H0=self.H0
else:
self.setH0(H0);
if posUnderSelection >= 0:
if self.positions is None:
self.positions=map(int, self.initDiploidPop.lociPos())
self.set_posUnderSelection(posUnderSelection)
assert ExperimentName != None
self.save=save
self.model=model
self.minIncrease = minIncrease
self.samplingTimes=sampingTimes
self.initialNeutralGenerations=initialNeutralGenerations
self.onlyKeep=onlyKeep
self.makeSureSelectedSiteDontGetLost=makeSureSelectedSiteDontGetLost
self.ignoreInitialNeutralGenerations=ignoreInitialNeutralGenerations
self.msmsFile=msmsFile;self.outpath=outpath; self.outpath=outpath ; self.N=N; self.generationStep=generationStep; self.maxGeneration= maxGeneration;
self.foldInitialAFs=foldInitialAFs;self.doForwardSimulationNow=doForwardSimulationNow;self.experimentID=experimentID
self.simulateNeutrallyFor=simulateNeutrallyFor
self.setH0(H0);
if not os.path.exists(self.outpath) : os.makedirs(self.outpath)
self.outpath+=ExperimentName
if not os.path.exists(self.outpath) : os.makedirs(self.outpath)
self.outpathmsms=self.outpath+'/msms/';self.outpath+='/simpop/'
if not os.path.exists(self.outpath) : os.makedirs(self.outpath)
if not os.path.exists(self.outpathmsms) : os.makedirs(self.outpathmsms)
if self.maxGeneration is None: self.maxGeneration=Simulation.getFixationTime(self.s, Ne=self.F, roundto10=True)
self.theta=2*self.Ne*self.mu*self.L
self.pops=[]
if self.model is None:
import simuPOP.demography as dmg
self.model=dmg.LinearGrowthModel(T=self.maxGeneration, N0=self.N, NT=self.N)
if self.doForwardSimulationNow:
self.forwardSimulation()
@staticmethod
def simulateSingleLoci(nu0=0.005, T=100, s=0.1, N=1000,verbose=True,h=0.5,seed=None):
if verbose:
print '.',
step = 1
Simulation.setSeed(seed)
pop = sim.Population(size=N, ploidy=2, loci=[1],infoFields=['fitness']);sim.initGenotype(pop, prop=[1-nu0,nu0]);simulator = sim.Simulator(pop.clone(), rep=1);
# sim.stat(pop, alleleFreq=[0]); print pop.dvars().alleleFreq[0][1]
global a;a = "0;;{}\n".format(nu0)
simulator.evolve(initOps=[sim.InitSex()],
preOps=sim.MapSelector(loci=0, fitness={(0, 0): 1, (0, 1): 1 + s *h, (1, 1): 1 + s}),
matingScheme=sim.RandomMating(), postOps=[sim.Stat(alleleFreq=[0], step=step),
sim.PyEval("'%d;;' % (gen+1)", reps=0, step=step,
output=fff), sim.PyEval(
r"'{}\n'.format(map(lambda x: round(x[1],5),alleleFreq.values())[0])", step=step, output=fff)],
gen=T)
return pd.DataFrame(zip(*map(lambda x: x.split(';;'), a.strip().split('\n')))).T.set_index(0)[1].astype(float)
def createInitialDiploidPopulation(self):
"""
initHaps : np 2D array which m x nSS where m i number of individual haps and nSS is number of SS
return a homozygote diploid population which every haplotype is copied n times
"""
if self.initDiploidPop is not None: return self.initDiploidPop
assert int(2*self.N/self.F)==2*self.N/float(self.F) # N should be a multiplier of F
nSS=self.H0.shape[1];n=int(self.N/self.F)
try:
pop = sim.Population(size=self.N, ploidy=2, loci=nSS,lociPos=list(self.positions), infoFields='fitness')
except:
import traceback
print(traceback.format_exc())
print list(self.positions), nSS,n,self.H0.shape[0]
exit()
assert (self.N % self.H0.shape[0]) ==0
H= [[list(h.values),list(h.values)] for _ in range(n) for _,h in self.H0.iterrows()]
for (i,h) in zip(pop.individuals(),H): # for each indv assing first and second chromosome
i.setGenotype(h[0],0 );i.setGenotype(h[1],1 ) #homozygote population of diploid
# sim.stat(pop, alleleFreq=range(nSS));print np.array([pop.dvars().alleleFreq[x][1] for x in range(nSS)])
return pop
@staticmethod
def getGT(pop, i=None, pos=None):
if i == None and pos == None:
df = pd.concat([pd.DataFrame([list(i.genotype(0)) for i in pop.individuals()]),
pd.DataFrame([list(i.genotype(1)) for i in pop.individuals()])],
keys=[0, 1]).sort_index().reorder_levels([1, 0]).sort_index()
df.columns = map(int, pop.lociPos())
return df
i = np.where(np.array(pop.lociPos()).astype(int) == pos)[0][0]
a, b = [], []
for ind in pop.individuals():
a += [ind.genotype(0)[i]]
b += [ind.genotype(1)[i]]
return pd.concat([pd.Series(a), pd.Series(b)], keys=[0, 1]).reorder_levels([1, 0]).sort_index()
@staticmethod
def createDiploidPopulationFromDataFrame(df):
"""
initHaps : np 2D array which m x nSS where m i number of individual haps and nSS is number of SS
return a homozygote diploid population which every haplotype is copied n times
"""
pop = sim.Population(size=df.shape[0]/2, ploidy=2, loci=df.shape[1], lociPos=list(df.columns), infoFields='fitness')
for j,i in enumerate(pop.individuals()): # for each indv assing first and second chromosome
i.setGenotype(df.loc[j].loc[0].tolist(),0 );i.setGenotype(df.loc[j].loc[1].tolist(),1 )
return pop
@staticmethod
def _simualtePop(pop, s=0, h=0.5, r=2e-8, siteUnderSelection=0,gen=1,recombinator=None,seed=None):
"Gets population and returns population"
Simulation.setSeed(seed)
simulator = sim.Simulator(pop.clone(), rep=1)
if recombinator is None:recombinator=sim.Recombinator(intensity=r)
simulator.evolve(
initOps=[sim.InitSex()],
preOps=sim.MapSelector(loci=siteUnderSelection, fitness={(0, 0): 1, (0, 1): 1 + s * h, (1, 1): 1 + s}),
matingScheme=sim.RandomMating(ops=recombinator),
gen=gen)
return simulator.population(0).clone()
@staticmethod
def _simualte(pop,s,h,r,siteUnderSelection,positions,startGeneration,generationStep,maxGeneration,model=None,makeSureSelectedSiteDontGetLost=True):
"Gets population and returns Dataframe, Static method"
N = int(pop.popSize())
if model is None:
import simuPOP.demography as dmg
model = dmg.LinearGrowthModel(T=maxGeneration, N0=N, NT=N)
simulator = sim.Simulator(pop.clone(), rep=1)
global a;a = ""
pops=[]
step=1# this is slow but safe, dont change it
simulator.evolve(
initOps=[sim.InitSex()],
preOps=sim.MapSelector(loci=siteUnderSelection, fitness={(0, 0): 1, (0, 1): 1 + s * h, (1, 1): 1 + s}),
matingScheme=sim.RandomMating(ops=sim.Recombinator(intensity=r),subPopSize=model),
postOps=[sim.Stat(alleleFreq=range(int(pop.numLoci()[0])), step=step), sim.PyEval("'Gen %4d;;' % (gen+1)", reps=0,step= step, output=fff), sim.PyEval(r"'{},'.format(map(lambda x: round(x[1],5),alleleFreq.values()))", step=step, output=fff),sim.PyOutput('\n', reps=-1, step=step, output=fff)],
gen = maxGeneration)
# idx=np.arange(self.generationStep-1,self.maxGeneration,self.generationStep)+self.initialNeutralGenerations
print a
_,data=zip(*map(lambda x: x.split(';;'),a.strip().split('\n')))
data=np.array(map(eval,data))[:,0,:]
print data
# if data[-1, self.siteUnderSelection] >= self.initialCarrierFreq + self.minIncrease or self.s == 0 or not self.makeSureSelectedSiteDontGetLost:
if data[-1, siteUnderSelection] or s == 0 or not makeSureSelectedSiteDontGetLost:
try:
pops+=[simulator.extract(0) ]
except:
print 'Error'
return data[int(startGeneration/generationStep):,:]
else:
return Simulation._simualte()
def simualte(self):
"Gets population and returns Dataframe, Class method"
import simuPOP.demography as dmg
# model=dmg.ExponentialGrowthModel(T=50, N0=1000, NT=200)
simulator = sim.Simulator(self.initDiploidPop.clone(), rep=1)
# sim.dump(self.initDiploidPop)
global a;a = ""
if self.recombinator is None:
self.recombinator=sim.Recombinator(intensity=self.r)
step=1# this is slow but safe, dont change it
simulator.evolve(
initOps=[sim.InitSex()],
preOps=sim.MapSelector(loci=self.siteUnderSelection, fitness={(0,0):1, (0,1):1+self.s*self.h, (1,1):1+self.s}),
matingScheme=sim.RandomMating(ops=self.recombinator,subPopSize=self.model),
postOps=[sim.Stat(alleleFreq=range(len(self.positions)), step=step),
sim.PyEval("'Gen %4d;;' % (gen+1)", reps=0,step= step, output=fff), sim.PyEval(r"'{},'.format(map(lambda x: round(x[1],5),alleleFreq.values()))", step=step, output=fff),sim.PyOutput('\n', reps=-1, step=step, output=fff)],
gen = self.maxGeneration)
# idx=np.arange(self.generationStep-1,self.maxGeneration,self.generationStep)+self.initialNeutralGenerations
_,data=zip(*map(lambda x: x.split(';;'),a.strip().split('\n')))
data=np.array(map(eval,data))[:,0,:]
# if data[-1, self.siteUnderSelection] >= self.initialCarrierFreq + self.minIncrease or self.s == 0 or not self.makeSureSelectedSiteDontGetLost:
if data[-1, self.siteUnderSelection] or self.s == 0 or not self.makeSureSelectedSiteDontGetLost:
try:
self.pops+=[simulator.extract(0) ]
except:
print 'Error'
return data[int(self.startGeneration/self.generationStep):,:]
else:
# print pd.Series(data[:, self.siteUnderSelection])
return self.simualte()
def simulateH0(self):
self.H0=MSMS.Song(F=self.F, L=self.L, Ne=self.Ne, r=self.r, mu=self.mu,uid=self.uidMSMS)
def set_siteUnderSelection(self,x):
self.siteUnderSelection=x
self.posUnderSelection=self.positions[self.siteUnderSelection]
def set_posUnderSelection(self,x):
self.posUnderSelection=x
self.siteUnderSelection=np.where(self.positions==self.posUnderSelection)[0][0]
def setH0(self,H0):
self.H0=H0
self.positions=self.H0.columns.values
self.F=self.H0.shape[0]
def set_BeneficialLoci(self,selectionOnRandomSite=False,siteUnderSelection=None,posUnderSelection =None):
if selectionOnRandomSite:
self.set_siteUnderSelection(np.random.randint(0,self.H0.shape[1]))
elif siteUnderSelection is not None:
self.set_siteUnderSelection(siteUnderSelection)
elif posUnderSelection is not None:
self.set_siteUnderSelection(posUnderSelection)
else:
if not self.s:
self.set_siteUnderSelection(self.X0.argmax())
else:
sites=np.sort(np.where(self.X0== self.initialCarrierFreq)[0]);
if not len(sites):
sites=np.sort(np.where(( self.X0 <= self.initialCarrierFreq +0.025) & ( self.X0 >= self.initialCarrierFreq -0.025) ) [0]);
if not len(sites):
print 'Try again. No site at freq ',self.initialCarrierFreq, self.uid; return
self.set_siteUnderSelection(sites[np.random.randint(0,len(sites))])
def createInitHaps(self):
assignPositions=True
if self.H0 is None:
H0 = MSMS.Song(F=self.F, L=self.L, Ne=self.Ne, r=self.r, mu=self.mu, uid=self.uidMSMS,
msmsFile=self.msmsFile, dir=self.outpathmsms)
else:
H0 = self.H0
assignPositions=False
if self.foldInitialAFs:
idx = H0.mean(0) > 0.5
H0.iloc[:, idx.values] = 1 - H0.iloc[:, idx.values]
self.setH0(H0)
if assignPositions:
self.positions_msms = self.H0.columns.values.copy(True)
self.positions = sorted(np.random.choice(self.L, self.H0.shape[1], replace=False))
self.H0 = pd.DataFrame(self.H0.values, columns=self.positions)
self.X0 = self.H0.mean(0).values
def forwardSimulation(self):
"""
returns np 3D array T x nSS x R which T=|{t_1,t_2,..}| (nnumber of times), nSS is number of SS , and R is the number of replicates
"""
import numpy as np
# df = pd.DataFrame([list(i.genotype(j)) for j in range(2) for i in self.initDiploidPop.individuals()])
if self.posUnderSelection<0 and self.initDiploidPop is None:
self.createInitHaps()
self.set_BeneficialLoci()
self.initDiploidPop=self.createInitialDiploidPopulation()
elif self.initDiploidPop is None:
self.createInitHaps()
self.initDiploidPop = self.createInitialDiploidPopulation()
# self.X0=self.H0.mean().values
else:
self.X0=Simulation.getGT(self.initDiploidPop).mean().values
# df = pd.DataFrame([list(i.genotype(j)) for j in range(2) for i in self.initDiploidPop.individuals()])
# print pd.concat([df.mean(),self.H0.mean().reset_index(drop=True)],1)
self.X=np.array([self.simualte() for _ in range(self.numReplicates)]).swapaxes(0, 2).swapaxes(0, 1)
self.X=np.append(np.tile(self.X0[:,None],(1,self.X.shape[2]))[None,:,:],self.X,axis=0)
self.sampleDepths()
if self.save:
pd.to_pickle(self,self.outpath+self.uid+'.pkl')
# self.createDF()
def getGenerationTimes(self,step=None,includeZeroGeneration=True):
if step is None: step=self.generationStep
times= np.arange(0,self.maxGeneration-self.startGeneration+1,step)
if includeZeroGeneration:
return times
else:
return times[1:]
def getTrueGenerationTimes(self,step=None,includeZeroGeneration=True):
if step is None: step=self.generationStep
times= np.arange(self.startGeneration,self.maxGeneration+1,step)
if includeZeroGeneration:
return times
else:
return times[1:]
@staticmethod
def getFixationTime(s,Ne=200,roundto10=True):
if s==0: s=0.01
t=-4*int(logit(1./Ne)/s)
if roundto10:
return (t//10 +1)*10
else:
return t
@staticmethod
def sampleInitSamplingTime(s,Ne=200,phase=0,samplingWindow=50,startOfEpoch=False):
fix=Simulation.getFixationTime(s, Ne=Ne)
if phase==0: lower,upper=(0, fix-samplingWindow)
if phase==1: lower,upper=(0, fix/3-samplingWindow)
if phase==2: lower,upper=(fix/3, 2*fix/3-samplingWindow)
if phase==3: lower,upper=(2*fix/3, fix-samplingWindow)
if startOfEpoch:
rnd=lower
else:
rnd=np.random.randint(lower,max(lower,upper)+1)
return int(rnd)//10 *10
@staticmethod
def sampleStartTimesforAlls(samplingWindow=50):
S=[0.1, 0.05, 0.02, 0.01,0]
for phase in [1,2,3]:
pd.DataFrame([[Simulation.sampleInitSamplingTime(s, phase=phase, samplingWindow=samplingWindow, startOfEpoch=True) for _ in range(100)] for s in S], index=S).T.to_pickle('/home/arya/out/startSamplingTimes.phase{}.sampleWin{}.pkl'.format(phase, samplingWindow))
def setSamplingTimes(self,maxGeneration=None,numSamples=5,step=None,startGeneration=None):
GT=pd.Series(range(len(self.getTrueGenerationTimes(includeZeroGeneration=True))),index=self.getTrueGenerationTimes(includeZeroGeneration=True))
if startGeneration is not None: self.startGeneration=startGeneration
if maxGeneration is not None: self.maxGeneration = maxGeneration
if step is not None:self.generationStep=step
else: self.generationStep=(self.maxGeneration-self.startGeneration)/numSamples
i = GT.loc[self.getTrueGenerationTimes(includeZeroGeneration=True)[:self.X.shape[0]]].values
self.X = self.X[i, :, :]
self.C = self.C.apply(lambda x: x[i, :, :])
self.D = self.D.apply(lambda x: x[i, :, :])
self.X0=self.X[0,:,0]
@staticmethod
def getSamplingTimeBasedOnFreq(sim,phase,samplingWin=50):
carrier_freq=[0.1,0.5,0.9][phase-1]
a= np.where(sim.X[:,sim.siteUnderSelection,:].mean(1)>carrier_freq)[0]
ft=sim.getTrueGenerationTimes().max()
if len(a):
t= sim.getTrueGenerationTimes()[np.where(sim.X[:,sim.siteUnderSelection,:].mean(1)>carrier_freq)[0].min()]
else:
t=sim.getTrueGenerationTimes().max()
return min(t,ft-samplingWin)
@staticmethod
def Load(s=0.1, experimentID=0, nu0=0.005, numReplicates=3, step=10, ModelName='TimeSeries', samplingWindow=50,
L=50000, depthRate=30):
if not s: nu0=0.005
sim = Simulation.load(s=s, experimentID=experimentID % 100, nu0=nu0, numReplicates=numReplicates, step=step,
ExperimentName=ModelName, All=True, L=L, replicates=range(numReplicates),
coverage=depthRate)
sim.experimentID=experimentID
startGen=0
sim.setSamplingTimes(maxGeneration=min(startGen+samplingWindow,sim.getTrueGenerationTimes()[-1]),step=step,startGeneration=startGen)
sim.createDF()
return sim
def getHardSweepMutations(self):
MAF=1./self.H0.shape[0]
dups=self.H0[self.H0.duplicated()]
x0=pd.Series(self.X0, index=self.positions)
hard=[]
for _,dup in dups.iterrows():
numDup=self.H0.apply(lambda x:(x==dup).all(),axis=1).sum()
hard=np.append(hard, (dup*x0==numDup*MAF).replace({False:None}).dropna().index.values)
hard=np.sort(np.append(hard,(x0==MAF).replace({False:None}).dropna().index.values).astype(int))
return hard
@property
def df(self):
reps=range(self.numReplicates)
self.df=pd.concat([pd.DataFrame(self.X[:,:,r],columns=self.positions,index=pd.MultiIndex.from_product([[r],range(self.X.shape[0])],names=['REP','TIME'])).T for r in reps],axis=1)
if self.numReplicates==1:
self.df=self.df[0]
return self.df
def computeCDi(self, EE, depthRate):
E = EE.loc[depthRate]
index = pd.Series(range(E.shape[0]), E.index)
C = pd.concat([pd.DataFrame(self.C.loc[depthRate][:, :, r], columns=self.H0.columns,
index=pd.MultiIndex.from_product([[r], self.getTrueGenerationTimes()],
names=['REP', 'GEN'])).T for r in
range(self.numReplicates)], axis=1)
D = pd.concat([pd.DataFrame(self.D.loc[depthRate][:, :, r], columns=self.H0.columns,
index=pd.MultiIndex.from_product([[r], self.getTrueGenerationTimes()],
names=['REP', 'GEN'])).T for r in
range(self.numReplicates)], axis=1)
self.cd = pd.concat([pd.Series(zip(C[i], D[i])) for i in C.columns], axis=1)
self.cd.columns = C.columns;
self.cd.index = C.index
self.cdi = self.cd.applymap(lambda x: index.loc[x])
def sampleDepths(self,depths = [30, 100, 300]):
self.D = pd.Series(None, index=depths)
self.C = pd.Series(None, index=depths)
for depthRate in depths:
self.D.loc[depthRate] = np.random.poisson(depthRate,
self.X.shape[0] * self.X.shape[1] * self.X.shape[2]).reshape(
self.X.shape).astype(object)
self.C.loc[depthRate] = np.array([np.random.binomial(d, x) for x, d in
zip(self.X.reshape(-1), self.D.loc[depthRate].reshape(-1))]).reshape(
self.X.shape).astype(object)
@staticmethod
def sampleDepthX(X,cov):
D= np.random.poisson(cov,X.size)
C= np.array([np.random.binomial(d, x) for x, d in zip(X, D)])
return C,D
@staticmethod
def sampleDepthXSeries(X,cov):
C,D=Simulation.sampleDepthX(X.values,cov)
a=pd.DataFrame([C,D],columns=X.index,index=['C','D']).T
return a
@staticmethod
def computeCDdf(a, E):
index = pd.Series(range(E.shape[0]), E.index)
def f(x):
try:
return index.loc[x]
except:
return -1
z=a.groupby(level=[0,1],axis=1).apply(lambda x: x.apply(lambda y:(y.iloc[0],y.iloc[1]),1)).applymap(f)
return z[(z<0).sum(1)==0]
def getCD(self,coverage):
T=self.getTrueGenerationTimes()
Ti=T
if T[-1]!=self.C[coverage].shape[0]-1: Ti=range(self.C[coverage].shape[0])
C=pd.concat([pd.DataFrame(self.C[coverage][Ti,:,i],columns=self.positions,index=T).T for i in range(self.numReplicates)],1,keys=range(self.C[coverage].shape[2]))
D=pd.concat([pd.DataFrame(self.D[coverage][Ti,:,i],columns=self.positions,index=T).T for i in range(self.numReplicates)],1,keys=range(self.C[coverage].shape[2]))
CD=pd.concat([C,D],1,keys=['C','D']).reorder_levels([1,2,0],1).sort_index(1)
CD.columns.names=['REP','GEN','READ']
return CD
@staticmethod
def Recombinator(rate, loci):
"""
Recombination at loci, after variant index. Loci can take value in [0, NumSNPs-1]
Args:
rate: recombination rate
loci: index of the loci in which rec is is being performed
Returns: recombinator which is an argument of Simulation, _simulation2 and evolve. It can be list of loci
"""
if not isinstance(loci, list):
loci = [loci]
return sim.Recombinator(intensity=rate, loci=loci)
class POP:
@staticmethod
def createISOGenicDiploidPopulation(df):
"""
initHaps : np 2D array which m x nSS where m i number of individual haps and nSS is number of SS
return a homozygote diploid population which every haplotype is copied n times
"""
pop = sim.Population(size=df.shape[0], ploidy=2, loci=df.shape[1], lociPos=list(df.columns),
infoFields='fitness')
for (i, (_, h)) in zip(pop.individuals(), df.iterrows()):
i.setGenotype(h.tolist(), 0);
i.setGenotype(h.tolist(), 1)
return pop
@staticmethod
def toDF(pop):
x = pd.concat(map(pd.DataFrame, [map(list, [i.genotype(0), i.genotype(1)]) for i in pop.allIndividuals()]),
keys=range(pop.popSize()))
x.columns = list(pop.lociPos())
return x
@staticmethod
def freq(pop):
sim.stat(pop, alleleFreq=range(pop.numLoci()[0]), vars=['alleleFreq'])
return pd.Series(pd.DataFrame(pop.vars()['alleleFreq']).loc[1].reindex().values,map(int,pop.lociPos())).fillna(0)
@staticmethod
def Haplotypes(pop,counts=False,unique=True):
if isinstance(pop,sim.Population):
a=POP.toDF(pop)
else:
a=pop
H=a.reset_index(drop=True)
H.columns=map(int,H.columns)
b=H.loc[H.sum(1).sort_values().index].astype(str).apply(lambda x: ''.join(x), 1).reset_index(drop=True)
if counts:
return b.value_counts().sort_index()
else:
if unique:
b=b.drop_duplicates()
return b.loc[b.sort_values().index].reset_index(drop=True)
@staticmethod
def establish(H, ba, k=5):
N = H.shape[0]
car = H[H[ba] == 1]
n = car.shape[0]
return pd.concat([car.iloc[np.random.choice(n, k)], H.iloc[np.random.choice(N, N - k)]]).reset_index(drop=True)
class Drift:
@staticmethod
def nextGeneration(N,x):
return (np.random.random(N)<=x).mean()
@staticmethod
def sampleReads(D,x):
return [Drift.sampleReadsDerived(D,x),D]
@staticmethod
def sampleReadsDerived(D,x):
return (np.random.random(D)<=x).sum()
@staticmethod
def simulateAF(N,x,T):
Xt=[]
for i in range(1, T[-1]+1):
x=Drift.nextGeneration(N,x)
if i in T:Xt.append(x)
return Xt
@staticmethod
def simulatePoolCD(N,n,cd):
x=cd[0].C/float(cd[0].D)
D=cd.xs('D',level=1)
Xt=[]
for i in range(1, D.index[-1]+1):
x=Drift.nextGeneration(N,x)
if i in D.index:
y=Drift.nextGeneration(n,x)
Xt.append(Drift.sampleReads(D[i], y))
return pd.DataFrame([[cd[0].C,cd[0].D]]+Xt,index=D.index,columns=['C','D'])
@staticmethod
def simulatePoolDerivd(N,n,cd):
x=cd[0].C/float(cd[0].D)
D=cd.xs('D',level=1)
Xt=[]
for i in range(1, D.index[-1]+1):
x=Drift.nextGeneration(N,x)
if i in D.index:
Xt+=[Drift.sampleReadsDerived(D[i], Drift.nextGeneration(n,x))]
return [cd[0].C]+Xt
@staticmethod
def simulatePools(N,cd,M):
return pd.concat([Drift.simulatePool(N,cd) for _ in range(M)],keys=range(M))
@staticmethod
def simulateAFs(N,x,T,M):
return pd.DataFrame([Drift.simulateAF(N,x,T) for _ in range(M)],columns=T)
| mit | 8,298,902,231,767,576,000 | 46.236597 | 304 | 0.578031 | false |
turdusmerula/kipartman | test/TESTpluginImportCSV.py | 1 | 1083 | import sys, os
#TODO: LOOK UP CURRENT DIRECTORY
# sys.argv[0] <fulldirectory>\\<this filename> in ipython does not describe this filename
# so use os.getcwd
# For this TEST just add both possible paths to the necessary imports
#
#
sys.path.append(
os.path.join(os.path.split(os.path.dirname(sys.argv[0]))[0],'kipartman'))
sys.path.append(os.path.join(os.getcwd(),'kipartman'))
print(sys.path)
from plugins import plugin_loader
from plugins import import_plugins as import_plugins
# RETRIEVE the find_parts
import rest
'''
Gets a file path via popup, then imports content
'''
importers = plugin_loader.load_import_plugins()
wildcards = '|'.join([x.wildcard for x in importers])
wildcards
importers[0]
importpath=os.path.join(os.getcwd(),'','17W50TESTimportCSV.csv')
importpath
base, ext = os.path.splitext(importpath)
thecategory = eval(u"{'childs': None,\n 'description': '',\n 'id': 4,\n 'name': 'Test',\n 'parent': {'id': 1},\n 'path': '/Resistor/Test'}")
# 1: For sqldb 0: for CsvImport
importItems = importers[0]().fetch(base, thecategory, rest.model)
pass | gpl-3.0 | 13,814,710,417,899,064 | 22.06383 | 140 | 0.713758 | false |
setsulla/owanimo | script/battle_arena.py | 1 | 1283 | import os
import sys
import time
from owanimo.app.error import ERROR as e
from owanimo.script import allegory_special
from owanimo.util import define
from owanimo.util.log import LOG as L
class Allegory(allegory_special.Allegory):
def __init__(self, runner, profile, player):
allegory_special.Allegory.__init__(self, runner, profile, player)
time.sleep(2)
def before(self):
L.info("*** Start Allegory : %s *** " % __file__)
self.start()
def test(self):
# Step 1 : Login
result = self.check(True, self.login(), e.LOGIN)
self.flush(self.step())
if not result: return
# Step 2 : Select Normal Battle
result = self.check(True, self.battle_arena("special/battle_30BP.png"), e.START_BATTLE)
self.flush(self.step())
if not result: return
# Step 3 : Normal Battle
result = self.check(True, self.battle_puyo(), e.PUYO_BATTLE)
self.flush(self.step())
if not result: return
# Step 4 : Battle Result
result = self.check(True, self.battle_arena_result(), e.CHECK_BATTLE)
self.flush(self.step())
if not result: return
def after(self):
L.info("*** End Allegory : %s *** " % __file__)
self.stop()
| mit | -2,368,284,761,337,164,300 | 29.547619 | 95 | 0.605612 | false |
mallconnectionorg/openerp | logistica/stock_posiciones_por_ubicacion/__openerp__.py | 1 | 1871 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'PosicionesPorUbicacion',
'version': '1.0',
'category': 'Warehouse',
'description': """
Este modulo implementa la posibilidad de manejar multiples posiciones por ubicacion. Por ejemplo,
puede dividir una ubicacion entre distintas posiciones entre racks y estantes. Además puede establecer
un flujo de reabastecimiento entre las posiciones.
""",
'author': 'Cesar Lopez Aguillon',
'depends': ['stock','point_of_sale'],
'data': [
'stock_posiciones_por_ubicacion.xml',
'stock_posiciones_por_ubicacion_data.xml',
'wizard/stock_posiciones_por_ubicacion_wizard.xml',
'security/ir.model.access.csv',
'security/stock_posiciones_por_ubicacion_security.xml',
],
'demo': [],
'installable': True,
'test': [],
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 156,399,566,055,900,960 | 38.787234 | 102 | 0.632086 | false |
ericholscher/django | django/views/i18n.py | 1 | 10581 | import importlib
import json
import os
import gettext as gettext_module
from django import http
from django.conf import settings
from django.template import Context, Template
from django.utils.translation import check_for_language, to_locale, get_language
from django.utils.encoding import smart_text
from django.utils.formats import get_format_modules, get_format
from django.utils._os import upath
from django.utils.http import is_safe_url
from django.utils import six
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.POST.get('next', request.GET.get('next'))
if not is_safe_url(url=next, host=request.get_host()):
next = request.META.get('HTTP_REFERER')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get('language', None)
if lang_code and check_for_language(lang_code):
if hasattr(request, 'session'):
request.session['_language'] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
formats = {}
for k, v in result.items():
if isinstance(v, (six.string_types, int)):
formats[k] = smart_text(v)
elif isinstance(v, (tuple, list)):
formats[k] = [smart_text(value) for value in v]
return formats
js_catalog_template = r"""
{% autoescape off %}
(function (globals) {
var django = globals.django || (globals.django = {});
{% if plural %}
django.pluralidx = function (n) {
var v={{ plural }};
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
};
{% else %}
django.pluralidx = function (count) { return (count == 1) ? 0 : 1; };
{% endif %}
{% if catalog_str %}
/* gettext library */
django.catalog = {{ catalog_str }};
django.gettext = function (msgid) {
var value = django.catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
};
django.ngettext = function (singular, plural, count) {
value = django.catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[django.pluralidx(count)];
}
};
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) {
var value = django.gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
};
django.npgettext = function (context, singular, plural, count) {
var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = django.ngettext(singular, plural, count);
}
return value;
};
{% else %}
/* gettext identity library */
django.gettext = function (msgid) { return msgid; };
django.ngettext = function (singular, plural, count) { return (count == 1) ? singular : plural; };
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) { return msgid; };
django.npgettext = function (context, singular, plural, count) { return (count == 1) ? singular : plural; };
{% endif %}
django.interpolate = function (fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
};
/* formatting library */
django.formats = {{ formats_str }};
django.get_format = function (format_type) {
var value = django.formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
};
/* add to global namespace */
globals.pluralidx = django.pluralidx;
globals.gettext = django.gettext;
globals.ngettext = django.ngettext;
globals.gettext_noop = django.gettext_noop;
globals.pgettext = django.pgettext;
globals.npgettext = django.npgettext;
globals.interpolate = django.interpolate;
globals.get_format = django.get_format;
}(this));
{% endautoescape %}
"""
def render_javascript_catalog(catalog=None, plural=None):
template = Template(js_catalog_template)
indent = lambda s: s.replace('\n', '\n ')
context = Context({
'catalog_str': indent(json.dumps(
catalog, sort_keys=True, indent=2)) if catalog else None,
'formats_str': indent(json.dumps(
get_formats(), sort_keys=True, indent=2)),
'plural': plural,
})
return http.HttpResponse(template.render(context), 'text/javascript')
def get_javascript_catalog(locale, domain, packages):
default_locale = to_locale(settings.LANGUAGE_CODE)
packages = [p for p in packages if p == 'django.conf' or p in settings.INSTALLED_APPS]
t = {}
paths = []
en_selected = locale.startswith('en')
en_catalog_missing = True
# paths of requested packages
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(upath(p.__file__)), 'locale')
paths.append(path)
# add the filesystem paths listed in the LOCALE_PATHS setting
paths.extend(list(reversed(settings.LOCALE_PATHS)))
# first load all english languages files for defaults
for path in paths:
try:
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except IOError:
pass
else:
# 'en' is the selected language and at least one of the packages
# listed in `packages` has an 'en' catalog
if en_selected:
en_catalog_missing = False
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
# If the currently selected language is English but it doesn't have a
# translation catalog (presumably due to being the language translated
# from) then a wrong language catalog might have been loaded in the
# previous step. It needs to be discarded.
if en_selected and en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t = locale_t
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':', 1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1]
pdict = {}
maxcnts = {}
catalog = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, six.string_types):
catalog[k] = v
elif isinstance(k, tuple):
msgid = k[0]
cnt = k[1]
maxcnts[msgid] = max(cnt, maxcnts.get(msgid, 0))
pdict.setdefault(msgid, {})[cnt] = v
else:
raise TypeError(k)
for k, v in pdict.items():
catalog[k] = [v.get(i, '') for i in range(maxcnts[msgid] + 1)]
return catalog, plural
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
return render_javascript_catalog()
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
locale = to_locale(get_language())
if request.GET and 'language' in request.GET:
if check_for_language(request.GET['language']):
locale = to_locale(request.GET['language'])
if packages is None:
packages = ['django.conf']
if isinstance(packages, six.string_types):
packages = packages.split('+')
catalog, plural = get_javascript_catalog(locale, domain, packages)
return render_javascript_catalog(catalog, plural)
| bsd-3-clause | 7,840,672,329,311,219,000 | 33.691803 | 124 | 0.617049 | false |
Alir3z4/django-databrowse | django_databrowse/datastructures.py | 1 | 11642 | """
These classes are light wrappers around Django's database API that provide
convenience functionality and permalink functions for the databrowse app.
"""
from django.db import models
from django.utils import formats
from django.utils.text import capfirst
from django.utils.encoding import smart_text, smart_text, iri_to_uri
from django.utils.safestring import mark_safe
from django.db.models.query import QuerySet
from django.core.exceptions import ObjectDoesNotExist
from django.utils.encoding import python_2_unicode_compatible
EMPTY_VALUE = '(None)'
DISPLAY_SIZE = 100
class EasyModel(object):
def __init__(self, site, model):
self.site = site
self.model = model
self.model_list = site.registry.keys()
self.verbose_name = model._meta.verbose_name
self.verbose_name_plural = model._meta.verbose_name_plural
def __repr__(self):
return '<EasyModel for %s>' % \
smart_text(self.model._meta.object_name)
def model_databrowse(self):
"Returns the ModelDatabrowse class for this model."
return self.site.registry[self.model]
def url(self):
return mark_safe('%s%s/%s/' % (self.site.root_url,
self.model._meta.app_label,
self.model._meta.model_name))
def objects(self, **kwargs):
return self.get_query_set().filter(**kwargs)
def get_query_set(self):
qs = self.model._default_manager.get_queryset()
easy_qs = EasyQuerySet(model=qs.model, query=qs.query.clone(),
using=qs._db, hints=qs._hints)
easy_qs._easymodel = self
return easy_qs
def object_by_pk(self, pk):
return EasyInstance(self, self.model._default_manager.get(pk=pk))
def sample_objects(self):
for obj in self.model._default_manager.all()[:3]:
yield EasyInstance(self, obj)
def field(self, name):
try:
f = self.model._meta.get_field(name)
except models.FieldDoesNotExist:
return None
return EasyField(self, f)
def fields(self):
return [EasyField(self, f) for f in (self.model._meta.fields +
self.model._meta.many_to_many)]
class EasyField(object):
def __init__(self, easy_model, field):
self.model, self.field = easy_model, field
def __repr__(self):
return smart_text(u'<EasyField for %s.%s>' %
(self.model.model._meta.object_name,
self.field.name))
def choices(self):
for value, label in self.field.choices:
yield EasyChoice(self.model, self, value, label)
def url(self):
if self.field.choices:
return mark_safe('%s%s/%s/%s/' %
(self.model.site.root_url,
self.model.model._meta.app_label,
self.model.model._meta.model_name,
self.field.name))
elif self.field.rel:
return mark_safe('%s%s/%s/' %
(self.model.site.root_url,
self.model.model._meta.app_label,
self.model.model._meta.model_name))
class EasyChoice(object):
def __init__(self, easy_model, field, value, label):
self.model, self.field = easy_model, field
self.value, self.label = value, label
def __repr__(self):
return smart_text(u'<EasyChoice for %s.%s>' %
(self.model.model._meta.object_name,
self.field.name))
def url(self):
return mark_safe('%s%s/%s/%s/%s/' %
(self.model.site.root_url,
self.model.model._meta.app_label,
self.model.model._meta.model_name,
self.field.field.name,
iri_to_uri(self.value)))
@python_2_unicode_compatible
class EasyInstance(object):
def __init__(self, easy_model, instance):
self.model, self.instance = easy_model, instance
def __repr__(self):
return smart_text(u'<EasyInstance for %s (%s)>' %
(self.model.model._meta.object_name,
self.instance._get_pk_val()))
def __str__(self):
val = smart_text(self.instance)
if len(val) > DISPLAY_SIZE:
return val[:DISPLAY_SIZE] + u'...'
return val
def pk(self):
return self.instance._get_pk_val()
def url(self):
return mark_safe('%s%s/%s/objects/%s/' %
(self.model.site.root_url,
self.model.model._meta.app_label,
self.model.model._meta.model_name,
iri_to_uri(self.pk())))
def fields(self):
"""
Generator that yields EasyInstanceFields for each field in this
EasyInstance's model.
"""
for f in self.model.model._meta.fields +\
self.model.model._meta.many_to_many:
yield EasyInstanceField(self.model, self, f)
def related_objects(self):
"""
Generator that yields dictionaries of all models that have this
EasyInstance's model as a ForeignKey or ManyToManyField, along with
lists of related objects.
"""
related_objects = [
f for f in self.model.model._meta.get_fields()
if (f.one_to_many or f.one_to_one)
and f.auto_created and not f.concrete
]
related_m2m = [
f for f in self.model.model._meta.get_fields(include_hidden=True)
if f.many_to_many and f.auto_created
]
for rel_object in related_objects + related_m2m:
if rel_object.model not in self.model.model_list:
continue # Skip models that aren't in the model_list
em = EasyModel(self.model.site, rel_object.related_model)
try:
rel_accessor = getattr(self.instance, rel_object.get_accessor_name())
except ObjectDoesNotExist:
continue
if rel_object.field.rel.multiple:
object_list = [EasyInstance(em, i) for i in rel_accessor.all()]
else: # for one-to-one fields
object_list = [EasyInstance(em, rel_accessor)]
yield {
'model': em,
'related_field': rel_object.field.verbose_name,
'object_list': object_list,
}
class EasyInstanceField(object):
def __init__(self, easy_model, instance, field):
self.model, self.field, self.instance = easy_model, field, instance
self.raw_value = getattr(instance.instance, field.name)
def __repr__(self):
return smart_text(u'<EasyInstanceField for %s.%s>' %
(self.model.model._meta.object_name,
self.field.name))
def values(self):
"""
Returns a list of values for this field for this instance. It's a list
so we can accomodate many-to-many fields.
"""
# This import is deliberately inside the function because it causes
# some settings to be imported, and we don't want to do that at the
# module level.
if self.field.rel:
if isinstance(self.field.rel, models.ManyToOneRel):
objs = getattr(self.instance.instance, self.field.name)
elif isinstance(self.field.rel,
models.ManyToManyRel): # ManyToManyRel
return list(getattr(self.instance.instance,
self.field.name).all())
elif self.field.choices:
objs = dict(self.field.choices).get(self.raw_value, EMPTY_VALUE)
elif isinstance(self.field, models.DateField) or \
isinstance(self.field, models.TimeField):
if self.raw_value:
if isinstance(self.field, models.DateTimeField):
objs = capfirst(formats.date_format(self.raw_value,
'DATETIME_FORMAT'))
elif isinstance(self.field, models.TimeField):
objs = capfirst(formats.time_format(self.raw_value,
'TIME_FORMAT'))
else:
objs = capfirst(formats.date_format(self.raw_value,
'DATE_FORMAT'))
else:
objs = EMPTY_VALUE
elif isinstance(self.field, models.BooleanField) or \
isinstance(self.field, models.NullBooleanField):
objs = {True: 'Yes', False: 'No', None: 'Unknown'}[self.raw_value]
else:
objs = self.raw_value
return [objs]
def urls(self):
"Returns a list of (value, URL) tuples."
# First, check the urls() method for each plugin.
plugin_urls = []
for plugin_name, plugin in \
self.model.model_databrowse().plugins.items():
urls = plugin.urls(plugin_name, self)
if urls is not None:
#plugin_urls.append(urls)
values = self.values()
return zip(self.values(), urls)
if self.field.rel:
m = EasyModel(self.model.site, self.field.rel.to)
if self.field.rel.to in self.model.model_list:
lst = []
for value in self.values():
if value is None:
continue
url = mark_safe('%s%s/%s/objects/%s/' %
(self.model.site.root_url,
m.model._meta.app_label,
m.model._meta.model_name,
iri_to_uri(value._get_pk_val())))
lst.append((smart_text(value), url))
else:
lst = [(value, None) for value in self.values()]
elif self.field.choices:
lst = []
for value in self.values():
url = mark_safe('%s%s/%s/fields/%s/%s/' %
(self.model.site.root_url,
self.model.model._meta.app_label,
self.model.model._meta.model_name,
self.field.name,
iri_to_uri(self.raw_value)))
lst.append((value, url))
elif isinstance(self.field, models.URLField):
val = self.values()[0]
lst = [(val, iri_to_uri(val))]
else:
lst = [(self.values()[0], None)]
return lst
class EasyQuerySet(QuerySet):
"""
When creating (or cloning to) an `EasyQuerySet`, make sure to set the
`_easymodel` variable to the related `EasyModel`.
"""
def iterator(self, *args, **kwargs):
for obj in super(EasyQuerySet, self).iterator(*args, **kwargs):
yield EasyInstance(self._easymodel, obj)
def _clone(self, *args, **kwargs):
c = super(EasyQuerySet, self)._clone(*args, **kwargs)
c._easymodel = self._easymodel
return c
| bsd-3-clause | 8,765,281,568,235,199,000 | 39.423611 | 85 | 0.519155 | false |
SelvorWhim/competitive | Codewars/LinkedListsAlternatingSplit.py | 1 | 1195 | # this solution preserves original list structure, but new nodes shallow copy old data, so if the data is a reference type, changing it in one list will affect one of the others
class Node(object):
def __init__(self, data=None):
self.data = data
self.next = None
# shallow copy of the data, no copy of next
def clone(self):
return Node(self.data)
class Context(object):
def __init__(self, first, second):
self.first = first
self.second = second
def alternating_split(head):
if head == None or head.next == None: # fewer than 2 Nodes in the list
#return Context(head, None) # that made sense to me but examples say raise an error
raise ValueError()
ret = Context(head.clone(), head.next.clone())
main_it = head.next.next
ret_its = [ret.first, ret.second]
i = 2 # or 0, or work with booleans, all I need here is parity. But this way, solution is easily generalized to alternating split between 3 or more lists
while main_it != None:
ret_its[i % 2].next = main_it.clone()
ret_its[i % 2] = ret_its[i % 2].next
main_it = main_it.next
i += 1
return ret
| unlicense | 2,663,904,410,082,395,600 | 38.833333 | 177 | 0.632636 | false |
OpusVL/odoo | openerp/cli/scaffold.py | 1 | 4096 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import re
import sys
import jinja2
from . import Command
from openerp.modules.module import (get_module_root, MANIFEST, load_information_from_description_file as load_manifest)
class Scaffold(Command):
""" Generates an Odoo module skeleton. """
def run(self, cmdargs):
# TODO: bash completion file
parser = argparse.ArgumentParser(
prog="%s scaffold" % sys.argv[0].split(os.path.sep)[-1],
description=self.__doc__,
epilog=self.epilog(),
)
parser.add_argument(
'-t', '--template', type=template, default=template('default'),
help="Use a custom module template, can be a template name or the"
" path to a module template (default: %(default)s)")
parser.add_argument('name', help="Name of the module to create")
parser.add_argument(
'dest', default='.', nargs='?',
help="Directory to create the module in (default: %(default)s)")
if not cmdargs:
sys.exit(parser.print_help())
args = parser.parse_args(args=cmdargs)
args.template.render_to(
snake(args.name),
directory(args.dest, create=True),
{'name': args.name})
def epilog(self):
return "Built-in templates available are: %s" % ', '.join(
d for d in os.listdir(builtins())
if d != 'base'
)
builtins = lambda *args: os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'templates',
*args)
def snake(s):
""" snake cases ``s``
:param str s:
:return: str
"""
# insert a space before each uppercase character preceded by a
# non-uppercase letter
s = re.sub(r'(?<=[^A-Z])\B([A-Z])', r' \1', s)
# lowercase everything, split on whitespace and join
return '_'.join(s.lower().split())
def pascal(s):
return ''.join(
ss.capitalize()
for ss in re.sub('[_\s]+', ' ', s).split()
)
def directory(p, create=False):
expanded = os.path.abspath(
os.path.expanduser(
os.path.expandvars(p)))
if create and not os.path.exists(expanded):
os.makedirs(expanded)
if not os.path.isdir(expanded):
die("%s is not a directory" % p)
return expanded
env = jinja2.Environment()
env.filters['snake'] = snake
env.filters['pascal'] = pascal
class template(object):
def __init__(self, identifier):
# TODO: directories, archives (zipfile, tarfile)
self.id = identifier
if not os.path.isdir(self.path):
die("{} is not a valid module template".format(identifier))
def __str__(self):
return self.id
@property
def path(self):
return builtins(self.id)
def files(self):
""" Lists the (local) path and content of all files in the template
"""
for root, _, files in os.walk(self.path):
for f in files:
path = os.path.join(root, f)
yield path, open(path, 'rb').read()
def render_to(self, modname, directory, params=None):
""" Render this module template to ``dest`` with the provided
rendering parameters
"""
# overwrite with local
for path, content in self.files():
_, ext = os.path.splitext(path)
local = os.path.relpath(path, self.path)
dest = os.path.join(directory, modname, local)
destdir = os.path.dirname(dest)
if not os.path.exists(destdir):
os.makedirs(destdir)
with open(dest, 'wb') as f:
if ext not in ('.py', '.xml', '.csv', '.js'):
f.write(content)
else:
env.from_string(content)\
.stream(params or {})\
.dump(f, encoding='utf-8')
def die(message, code=1):
print >>sys.stderr, message
sys.exit(code)
def warn(message):
# ASK: shall we use logger ?
print "WARNING: " + message
| agpl-3.0 | 5,110,078,230,369,223,000 | 29.567164 | 119 | 0.561768 | false |
tensorflow/lingvo | lingvo/tasks/asr/tools/simple_wer.py | 1 | 9729 | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stand-alone script to evalute the word error rate (WER) for ASR tasks.
THIS SCRIPT IS NO LONGER SUPPORTED. PLEASE USE simple_wer_v2.py INSTEAD.
Tensorflow and Lingvo are not required to run this script.
Example of Usage::
python simple_wer.py file_hypothesis file_reference
python simple_wer.py file_hypothesis file_reference diagnosis_html
where `file_hypothesis` is the file name for hypothesis text and
`file_reference` is the file name for reference text.
`diagnosis_html` (optional) is the html filename to diagnose the errors.
Or you can use this file as a library, and call either of the following:
- ``ComputeWER(hyp, ref)`` compute WER for one pair of hypothesis/reference
- ``AverageWERs(hyps, refs)`` average WER for a list of hypotheses/references
Note to evaluate the ASR, we consider the following pre-processing:
- change transcripts to lower-case
- remove punctuation: ``" , . ! ? ( ) [ ]``
- remove extra empty spaces
"""
import re
import sys
def ComputeEditDistanceMatrix(hs, rs):
"""Compute edit distance between two list of strings.
Args:
hs: the list of words in the hypothesis sentence
rs: the list of words in the reference sentence
Returns:
Edit distance matrix (in the format of list of lists), where the first
index is the reference and the second index is the hypothesis.
"""
dr, dh = len(rs) + 1, len(hs) + 1
dists = [[]] * dr
# Initialization.
for i in range(dr):
dists[i] = [0] * dh
for j in range(dh):
if i == 0:
dists[0][j] = j
elif j == 0:
dists[i][0] = i
# Do dynamic programming.
for i in range(1, dr):
for j in range(1, dh):
if rs[i - 1] == hs[j - 1]:
dists[i][j] = dists[i - 1][j - 1]
else:
tmp0 = dists[i - 1][j - 1] + 1
tmp1 = dists[i][j - 1] + 1
tmp2 = dists[i - 1][j] + 1
dists[i][j] = min(tmp0, tmp1, tmp2)
return dists
def PreprocessTxtBeforeWER(txt):
"""Preprocess text before WER caculation."""
# Lowercase, remove \t and new line.
txt = re.sub(r'[\t\n]', ' ', txt.lower())
# Remove punctuation before space.
txt = re.sub(r'[,.\?!]+ ', ' ', txt)
# Remove punctuation before end.
txt = re.sub(r'[,.\?!]+$', ' ', txt)
# Remove punctuation after space.
txt = re.sub(r' [,.\?!]+', ' ', txt)
# Remove quotes, [, ], ( and ).
txt = re.sub(r'["\(\)\[\]]', '', txt)
# Remove extra space.
txt = re.sub(' +', ' ', txt.strip())
return txt
def _GenerateAlignedHtml(hyp, ref, err_type):
"""Generate a html element to highlight the difference between hyp and ref.
Args:
hyp: Hypothesis string.
ref: Reference string.
err_type: one of 'none', 'sub', 'del', 'ins'.
Returns:
a html string where disagreements are highlighted.
- hyp highlighted in green, and marked with <del> </del>
- ref highlighted in yellow
"""
highlighted_html = ''
if err_type == 'none':
highlighted_html += '%s ' % hyp
elif err_type == 'sub':
highlighted_html += """<span style="background-color: greenyellow">
<del>%s</del></span><span style="background-color: yellow">
%s </span> """ % (hyp, ref)
elif err_type == 'del':
highlighted_html += """<span style="background-color: yellow">
%s</span> """ % (
ref)
elif err_type == 'ins':
highlighted_html += """<span style="background-color: greenyellow">
<del>%s</del> </span> """ % (
hyp)
else:
raise ValueError('unknown err_type ' + err_type)
return highlighted_html
def GenerateSummaryFromErrs(nref, errs):
"""Generate strings to summarize word errors.
Args:
nref: integer of total words in references
errs: dict of three types of errors. e.g. {'sub':10, 'ins': 15, 'del': 3}
Returns:
Two strings:
- string summarizing total error, total word, WER,
- string breaking down three errors: deleting, insertion, substitute
"""
total_error = sum(errs.values())
str_sum = 'total error = %d, total word = %d, wer = %.2f%%' % (
total_error, nref, total_error * 100.0 / nref)
str_details = 'Error breakdown: del = %.2f%%, ins=%.2f%%, sub=%.2f%%' % (
errs['del'] * 100.0 / nref, errs['ins'] * 100.0 / nref,
errs['sub'] * 100.0 / nref)
return str_sum, str_details
def ComputeWER(hyp, ref, diagnosis=False):
"""Computes WER for ASR by ignoring diff of punctuation, space, captions.
Args:
hyp: Hypothesis string.
ref: Reference string.
diagnosis (optional): whether to generate diagnosis str (in html format)
Returns:
A tuple of 3 elements:
- dict of three types of errors. e.g. ``{'sub':0, 'ins': 0, 'del': 0}``
- num of reference words, integer
- aligned html string for diagnois (empty if diagnosis = False)
"""
hyp = PreprocessTxtBeforeWER(hyp)
ref = PreprocessTxtBeforeWER(ref)
# Compute edit distance.
hs = hyp.split()
rs = ref.split()
distmat = ComputeEditDistanceMatrix(hs, rs)
# Back trace, to distinguish different errors: insert, deletion, substitution.
ih, ir = len(hs), len(rs)
errs = {'sub': 0, 'ins': 0, 'del': 0}
aligned_html = ''
while ih > 0 or ir > 0:
err_type = ''
# Distinguish error type by back tracking
if ir == 0:
err_type = 'ins'
elif ih == 0:
err_type = 'del'
else:
if hs[ih - 1] == rs[ir - 1]: # correct
err_type = 'none'
elif distmat[ir][ih] == distmat[ir - 1][ih - 1] + 1: # substitute
err_type = 'sub'
elif distmat[ir][ih] == distmat[ir - 1][ih] + 1: # deletion
err_type = 'del'
elif distmat[ir][ih] == distmat[ir][ih - 1] + 1: # insert
err_type = 'ins'
else:
raise ValueError('fail to parse edit distance matrix')
# Generate aligned_html
if diagnosis:
if ih == 0 or not hs:
tmph = ' '
else:
tmph = hs[ih - 1]
if ir == 0 or not rs:
tmpr = ' '
else:
tmpr = rs[ir - 1]
aligned_html = _GenerateAlignedHtml(tmph, tmpr, err_type) + aligned_html
# If no error, go to previous ref and hyp.
if err_type == 'none':
ih, ir = ih - 1, ir - 1
continue
# Update error.
errs[err_type] += 1
# Adjust position of ref and hyp.
if err_type == 'del':
ir = ir - 1
elif err_type == 'ins':
ih = ih - 1
else: # err_type == 'sub'
ih, ir = ih - 1, ir - 1
assert distmat[-1][-1] == sum(errs.values())
# Num of words. For empty ref we set num = 1.
nref = max(len(rs), 1)
return errs, nref, aligned_html
def AverageWERs(hyps, refs, verbose=True, diagnosis=False):
"""Computes average WER from a list of references/hypotheses.
Args:
hyps: list of hypothesis strings.
refs: list of reference strings.
verbose: optional (default True)
diagnosis (optional): whether to generate list of diagnosis html
Returns:
A tuple of 3 elements:
- dict of three types of errors. e.g. ``{'sub':0, 'ins': 0, 'del': 0}``
- num of reference words, integer
- list of aligned html string for diagnosis (empty if diagnosis = False)
"""
totalw = 0
total_errs = {'sub': 0, 'ins': 0, 'del': 0}
aligned_html_list = []
for hyp, ref in zip(hyps, refs):
errs_i, nref_i, diag_str = ComputeWER(hyp, ref, diagnosis)
if diagnosis:
aligned_html_list += [diag_str]
totalw += nref_i
total_errs['sub'] += errs_i['sub']
total_errs['ins'] += errs_i['ins']
total_errs['del'] += errs_i['del']
if verbose:
str_summary, str_details = GenerateSummaryFromErrs(totalw, total_errs)
print(str_summary)
print(str_details)
return total_errs, totalw, aligned_html_list
def main(argv):
hyp = open(argv[1], 'r').read()
ref = open(argv[2], 'r').read()
if len(argv) == 4:
diagnosis = True
fn_output = argv[3]
else:
diagnosis = False
fn_output = None
errs, nref, aligned_html = ComputeWER(hyp, ref, diagnosis)
str_summary, str_details = GenerateSummaryFromErrs(nref, errs)
print(str_summary)
print(str_details)
if fn_output:
with open(fn_output, 'wt') as fp:
fp.write('<body><html>')
fp.write('<div>%s</div>' % aligned_html)
fp.write('</body></html>')
if __name__ == '__main__':
print('THIS SCRIPT IS NO LONGER SUPPORTED.'
'PLEASE USE simple_wer_v2.py INSTEAD.')
if len(sys.argv) < 3 or len(sys.argv) > 4:
print("""
Example of Usage:
python simple_wer.py file_hypothesis file_reference
or
python simple_wer.py file_hypothesis file_reference diagnosis_html
where file_hypothesis is the file name for hypothesis text
file_reference is the file name for reference text.
diagnosis_html (optional) is the html filename to diagnose the errors.
Or you can use this file as a library, and call either of the following
- ComputeWER(hyp, ref) to compute WER for one pair of hypothesis/reference
- AverageWERs(hyps, refs) to average WER for a list of hypotheses/references
""")
sys.exit(1)
main(sys.argv)
| apache-2.0 | -7,434,293,313,371,145,000 | 27.614706 | 80 | 0.617638 | false |
ActiveState/code | recipes/Python/576696_OrderedSet_with_Weakrefs/recipe-576696.py | 1 | 2863 | import collections
from weakref import proxy
class Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedSet(collections.MutableSet):
'Set the remembers the order elements were added'
# Big-O running times for all methods are the same as for regular sets.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The prev/next links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedSet.
def __init__(self, iterable=None):
self.__root = root = Link() # sentinel node for doubly linked list
root.prev = root.next = root
self.__map = {} # key --> link
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.__map)
def __contains__(self, key):
return key in self.__map
def add(self, key):
# Store new key in a new link at the end of the linked list
if key not in self.__map:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = root.prev = proxy(link)
def discard(self, key):
# Remove an existing item using self.__map to find the link which is
# then removed by updating the links in the predecessor and successors.
if key in self.__map:
link = self.__map.pop(key)
link.prev.next = link.next
link.next.prev = link.prev
def __iter__(self):
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = next(reversed(self)) if last else next(iter(self))
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return not self.isdisjoint(other)
| mit | -5,884,483,915,631,324,000 | 35.705128 | 87 | 0.575969 | false |
nvbn/django-discover-jenkins | tests/tests/test_runner.py | 1 | 2207 | from mock import MagicMock, patch
from django.test import TestCase
from discover_jenkins import runner, tasks
class FakeTestRunner(object):
"""
A fake object to stub out the base methods that the mixin's super() calls
require.
"""
def setup_test_environment(self):
pass
def teardown_test_environment(self):
pass
class Runner(runner.CIRunner, FakeTestRunner):
"""CIRunner is a mixin, so use the FakeTestRunner as a base"""
pass
class TestCIRunner(TestCase):
def test_get_tasks(self):
"""
Make sure the correct tasks are imported based on the
test_project.settings.
"""
self.assertEqual(runner.get_tasks(),
[tasks.with_coverage.CoverageTask,
tasks.run_pylint.PyLintTask,
tasks.run_jshint.JSHintTask,
tasks.run_sloccount.SlocCountTask])
def test_get_task_options(self):
"""
For now, just do a simple test to make sure the right number of options
are gleaned from the tasks.
"""
self.assertEqual(len(runner.get_task_options()), 14)
def test_setup_test_environment(self):
"""
Make sure the setup_test_environment method on a task is triggered by
the runner.
"""
mock_task = MagicMock()
with patch.object(Runner, '__init__') as mock_init:
mock_init.return_value = None
cirun = Runner()
cirun.jenkins = True
cirun.tasks = [mock_task]
cirun.setup_test_environment()
self.assertTrue(mock_task.setup_test_environment.called)
def test_teardown_test_environment(self):
"""
Make sure the setup_test_environment method on a task is triggered by
the runner.
"""
mock_task = MagicMock()
with patch.object(Runner, '__init__') as mock_init:
mock_init.return_value = None
cirun = Runner()
cirun.jenkins = True
cirun.tasks = [mock_task]
cirun.teardown_test_environment()
self.assertTrue(mock_task.teardown_test_environment.called)
| bsd-3-clause | 4,994,888,553,036,383,000 | 28.426667 | 79 | 0.593113 | false |
Tocknicsu/nctuoj_contest | test/api/submission/submission.py | 1 | 3212 | def set_contest():
import datetime
start = datetime.datetime.now() + datetime.timedelta(hours=-1)
end = datetime.datetime.now() + datetime.timedelta(hours=1)
start = str(start)[:-7]
end = str(end)[:-7]
return [{
"name": "adjust to now is in contest",
"url": "/api/contest/",
"method": "put",
"payload": {
"token": "ADMIN@TOKEN",
"title": "change",
"start": start,
"end": end,
"freeze": 0,
"description": "XD"
},
"response_status": 200,
"response_data": {
"msg": {
"title": "change",
"start": start,
"end": end,
"freeze": 0,
"description": "XD"
}
}
},]
def problem_data():
data = []
for i in range(1, 5):
data += [{
"name": "post_problem_%s"%(chr(ord('A')+i-1)),
"url": "/api/problems/",
"method": "post",
"payload": {
"token": "ADMIN@TOKEN",
"title": "problem A",
"score_type": 0
},
"files": {
"pdf": "./api/problem/problem.pdf"
},
"ignore": ["msg"],
"response_status": 200,
"response_data": {}
},
{
"name": "put_problem_execute",
"url": "/api/problems/%s/executes/"%(i),
"method": "put",
"payload": {
"token": "ADMIN@TOKEN",
"executes[]": [1, 2, 3, 4]
},
"ignore": ["msg"],
"response_status": 200,
"response_data": {}
}]
return data
def post_submission():
data = []
role_token = ["admin", "test", "unofficial", "official"]
for x in role_token:
for i in range(1, 5):
data += [{
"name": "post_submission_%s_%s"%(x, chr(ord('A')+i-1)),
"url": "/api/submissions/",
"method": "post",
"payload": {
"problem_id": i,
"execute_type_id": 1,
"file_name": "test.c",
"code": "#include <stdio.h>\n int main(){ printf(\"Hello World\"); }",
"token": "%s@TOKEN"%(x.upper()),
},
"ignore": ["msg"],
"response_status": 200,
"response_data":{}
}]
return data
def query_submissions():
data = []
role_token = ["admin", "test", "unofficial", "official"]
for x in role_token:
data += [
{
"name": "get_submission_%s"%(x),
"url": "/api/submissions/",
"method": "get",
"payload":{
"count": 10,
"page": 1,
"token": "%s@TOKEN"%(x.upper()),
},
"response_status": 200,
"response_data": {
}
},
]
return data
data = []
data += problem_data()
data += post_submission()
data += query_submissions()
| apache-2.0 | -1,998,208,727,368,725,200 | 27.936937 | 90 | 0.382005 | false |
will-hart/blitz | blitz/data/__init__.py | 1 | 8432 | import sys
__author__ = 'Will Hart'
from collections import OrderedDict
class DataContainer(object):
"""
A class for saving and managing data that can be used in the interface. It
also provides an interface for adding DataTransform objects which can be used
to apply filters (i.e. moving average, multiplication, etc) to the data
:param persistent: Indicates if all data is kept, (True) or only 200 values for each series (False, default)
"""
MAX_VALUES = 50
def __init__(self, persistent=False):
self.__series = OrderedDict()
self.__series_names = {}
self.number_of_series = 0
self.x = []
self.y = []
self.__transforms = []
self.x_transformed = []
self.y_transformed = []
self.__persistent = persistent
def clear_data(self):
"""
Clears all data from the data DataContainer
:returns: Nothing
"""
self.__series = OrderedDict()
self.x = []
self.y = []
self.__series_names = {}
self.number_of_series = 0
self.__transforms = []
self.x_transformed = []
self.y_transformed = []
def push(self, series_id, series_name, x, y):
"""
Adds the passed X and Y values to the given series. If the series has not been
registered with the DataContainer it registers it
:param series_id: The ID of the series
:param series_name: The human readable name of the series
:param x: the list of x-values to add
:param y: the list of y-values to add
:throws ValueError: if the x and y lists are of different lengths
:returns bool: True if the series was created, false if data was appended
"""
if len(x) != len(y):
raise ValueError("X and Y lists must have the same number of elements")
created = False
if series_id not in self.__series.keys():
self.__series[series_id] = self.number_of_series
self.__series_names[series_id] = series_name
self.x.append([])
self.y.append([])
self.number_of_series += 1
created = True
idx = self.__series[str(series_id)]
self.x[idx] += x
self.y[idx] += y
if not self.__persistent:
self.x[idx] = self.x[idx][-self.MAX_VALUES:]
self.y[idx] = self.y[idx][-self.MAX_VALUES:]
return created
def get_name(self, series_id):
"""
Returns the name of a series in the DataContainer with the given series ID
:param series_id: the series name to return
:returns: The name of the series if it is in the Container, otherwise the series ID
"""
return self.__series_names[series_id].replace("_", " ").title() \
if series_id in self.__series_names.keys() else series_id
def all_series(self):
"""
A generator which yields the series x, y values
:returns: generated [x, y] value lists
"""
for key in self.__series.keys():
idx = self.__series[key]
yield [key, self.x[idx], self.y[idx]]
def get_latest(self, named=False):
"""
Gets the latest readings for each variable type and returns them in a pair of variable name / value pairs
:param named: If False (default), the variables will be indexed by variable name, otherwise by series name
:returns: A list of tuples. Each tuple is in the form `(variable_name, value)`
"""
result = []
for k in self.__series.keys():
val = self.y[self.__series[k]][-1]
if named:
k = self.get_name(k)
result.append((k, val))
return result
def get_x(self, series_id):
"""
Gets a list of x-values for a specified series_name
:param series_id: the string name of the series to retrieve
:returns: a list of x values if the key is found, an empty list otherwise
"""
try:
idx = self.__series[str(series_id)]
except KeyError:
return []
return self.x[idx]
def get_y(self, series_id):
"""
Gets a list of y-values for a specified series_name
:param series_id: the string name of the series to retrieve
:returns: a list of y values if the key is found, an empty list otherwise
"""
try:
idx = self.__series[str(series_id)]
except KeyError:
return []
return self.y[idx]
def get_series(self, series_id):
"""
Gets a single series and returns a list of [x,y] values
:param series_id: The name of the series to return
:returns: A list of [x,y] values for the given series, or empty lists if the series doesn't exist
"""
if series_id not in self.__series.keys():
return [[], []]
else:
idx = self.__series[series_id]
return [self.x[idx], self.y[idx]]
def get_transformed_series(self, series_id):
"""
Gets a single series and returns a list of [x,y] values from the transformed data
:param series_id: The name of the series to return
:returns: A list of [x,y] values for the given series, or empty lists if the series doesn't exist
"""
if series_id not in self.__series.keys() or not self.x_transformed:
return [[], []]
else:
idx = self.__series[series_id]
return [self.x_transformed[idx], self.y_transformed[idx]]
def get_series_index(self, series_id):
"""
Gets the index for a given series, or returns None if the series is not found
:param series_id: The name of the series to find the index for
:returns: An integer representing the 0 based index of this series name in the series dictionary
"""
try:
return self.__series[series_id]
except KeyError:
return None
def has_series(self, series_id):
"""
Checks is the given series name is registered in the DataContainer
:param series_id: The name of the series to check (will be converted to string)
:returns: True if the series exists, false otherwise
"""
return str(series_id) in self.__series.keys()
def get_series_names(self):
"""
Returns a list of series names that are registered in this DataContainer
:returns: A list of string series names registered to this DataContainer
"""
return self.__series.keys()
def add_transform(self, transform):
"""
Adds a data transform to the DataContainer
"""
if not isinstance(transform, BaseDataTransform):
raise ValueError("Attempted to add a data transformation class which doesn't derive from BaseDataTransform")
self.__transforms.append(transform)
def apply_transforms(self):
"""
Applies the transformation chain
"""
self.x_transformed = [data[:] for data in self.x]
self.y_transformed = [data[:] for data in self.y]
for transform in self.__transforms:
transform.apply(self)
def get_transforms(self):
"""
Gets all the current transforms applied
:returns: A list of BaseDataTransform classes
"""
return self.__transforms
def empty(self):
"""
Checks if a DataContainer is empty. An empty data container has no
data series. A container with data series but no data values is NOT empty
:returns: True if there are no data series, False otherwise
"""
return len(self.__series.keys()) == 0
class BaseDataTransform(object):
"""
A base class which must be inherited by DataTransform classes.
"""
def apply(self, container):
"""
Takes a DataContainer object and applies a transformation to the X and Y data in the
DataContainer. This is a base class which should be inherited from.
.. warning::
If no `apply` method is provided on the derived class then a `NotImplementedError` will be thrown
:raises: NotImplementedError
"""
raise NotImplementedError("BaseDataTransform.apply should be overridden by derived instances")
| agpl-3.0 | -6,738,503,005,614,650,000 | 32.066667 | 120 | 0.591437 | false |
SvichkarevAnatoly/Course-Python-Bioinformatics | semester1/bioseq8/example8.py | 1 | 1082 | import rpy2.robjects as R
def binomialTailTest(count, nTrials, pEvent, oneSided=True):
alt = 'greater' if oneSided else 'two.sided'
func = R.r['binom.test']
result = func(x=count, n=nTrials, p=pEvent, alternative=alt)
return result[2][0]
count = 530
nTrials = 1000
pEvent = 0.5
result = binomialTailTest(count, nTrials, pEvent, oneSided=True)
print('Binomial one tail', result)
result = binomialTailTest(count, nTrials, pEvent, oneSided=False)
print('Binomial two tail', result)
def tTest(x, y, sameVariance=False):
func = R.r['t.test']
argDict = {'var.equal': sameVariance}
result = func(x=R.FloatVector(x), y=R.FloatVector(y), **argDict)
return result[0][0], result[2][0]
from numpy import array
samples1 = array([1.752, 1.818, 1.597, 1.697, 1.644, 1.593])
samples2 = array([1.878, 1.648, 1.819, 1.794, 1.745, 1.827])
print('Same variance result', tTest(samples1, samples2, sameVariance=True))
# Result is: -2.072, 0.0650
print('Not same variance result', tTest(samples1, samples2, sameVariance=False))
# Result is: # -2.072 0.0654
| gpl-2.0 | 5,413,816,223,328,314,000 | 25.390244 | 80 | 0.687616 | false |
cldavid/aquacc | temp_client.py | 1 | 1340 | #!/usr/bin/python
import socket
import re
import subprocess
sensors_db = {
"2857993450082": "in_temp",
"2866BC3C5006E": "out_temp"
}
ALERT_TEMP_IN_MIN = 27.5
ALERT_TEMP_IN_MAX = 29.5
print "Sensor DB"
for i in sensors_db:
print i, sensors_db[i]
print
s = socket.socket()
host = "localhost"
port = 5000
r = re.compile("^Epoch-Time:\s+(\d+)\s+Sensor:\s+(\w+),(\d+\.\d+),(\w+),(\d+\.\d+).*$")
s.connect((host, port))
f = s.makefile()
for i in range (0, 100) :
f.write("a")
f.write("\n");
f.flush();
while 1:
data = f.readline()
m = r.match(data)
if m :
epochTime = m.group(1)
sensorName1 = sensors_db[m.group(2)]
sensorValue1 = float(m.group(3))
sensorName2 = sensors_db[m.group(4)]
sensorValue2 = float(m.group(5))
sensor = { sensorName1: sensorValue1, sensorName2: sensorValue2 }
rrdString = "/usr/bin/rrdtool update /www/multirPItemp.rrd --template " + sensorName1 + ":" + sensorName2 + " -- " + str(epochTime) + ":" + str(sensorValue1) + ":" + str(sensorValue2)
print rrdString
subprocess.call(rrdString, shell=True)
if ((ALERT_TEMP_IN_MIN > sensor["in_temp"]) or (sensor["in_temp"] >= ALERT_TEMP_IN_MAX)) :
ifttt = "/usr/local/sbin/sendIFTTTmsg.sh new_temperature_event " + str(sensor["in_temp"])
print ifttt
subprocess.call(ifttt, shell=True)
s.close
| gpl-3.0 | 3,318,766,553,013,326,300 | 24.283019 | 185 | 0.627612 | false |
DG-i/openshift-ansible | roles/lib_openshift/library/oc_serviceaccount_secret.py | 1 | 58441 | #!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/serviceaccount_secret -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_serviceaccount_secret
short_description: Module to manage openshift service account secrets
description:
- Manage openshift service account secrets programmatically.
options:
state:
description:
- If present, the service account will be linked with the secret if it is not already. If absent, the service account will be unlinked from the secret if it is already linked. If list, information about the service account secrets will be gathered and returned as part of the Ansible call results.
required: false
default: present
choices: ["present", "absent", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: false
aliases: []
service_account:
description:
- Name of the service account.
required: true
default: None
aliases: []
namespace:
description:
- Namespace of the service account and secret.
required: true
default: None
aliases: []
secret:
description:
- The secret that should be linked to the service account.
required: false
default: None
aliases: []
author:
- "Kenny Woodson <[email protected]>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: get secrets of a service account
oc_serviceaccount_secret:
state: list
service_account: builder
namespace: default
register: sasecretout
- name: Link a service account to a specific secret
oc_serviceaccount_secret:
service_account: builder
secret: mynewsecret
namespace: default
register: sasecretout
'''
# -*- -*- -*- End included fragment: doc/serviceaccount_secret -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key']) or {}
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/serviceaccount.py -*- -*- -*-
class ServiceAccountConfig(object):
'''Service account config class
This class stores the options and returns a default service account
'''
# pylint: disable=too-many-arguments
def __init__(self, sname, namespace, kubeconfig, secrets=None, image_pull_secrets=None):
self.name = sname
self.kubeconfig = kubeconfig
self.namespace = namespace
self.secrets = secrets or []
self.image_pull_secrets = image_pull_secrets or []
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiate a properly structured volume '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'ServiceAccount'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['secrets'] = []
if self.secrets:
for sec in self.secrets:
self.data['secrets'].append({"name": sec})
self.data['imagePullSecrets'] = []
if self.image_pull_secrets:
for sec in self.image_pull_secrets:
self.data['imagePullSecrets'].append({"name": sec})
class ServiceAccount(Yedit):
''' Class to wrap the oc command line tools '''
image_pull_secrets_path = "imagePullSecrets"
secrets_path = "secrets"
def __init__(self, content):
'''ServiceAccount constructor'''
super(ServiceAccount, self).__init__(content=content)
self._secrets = None
self._image_pull_secrets = None
@property
def image_pull_secrets(self):
''' property for image_pull_secrets '''
if self._image_pull_secrets is None:
self._image_pull_secrets = self.get(ServiceAccount.image_pull_secrets_path) or []
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, secrets):
''' property for secrets '''
self._image_pull_secrets = secrets
@property
def secrets(self):
''' property for secrets '''
if not self._secrets:
self._secrets = self.get(ServiceAccount.secrets_path) or []
return self._secrets
@secrets.setter
def secrets(self, secrets):
''' property for secrets '''
self._secrets = secrets
def delete_secret(self, inc_secret):
''' remove a secret '''
remove_idx = None
for idx, sec in enumerate(self.secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.secrets[remove_idx]
return True
return False
def delete_image_pull_secret(self, inc_secret):
''' remove a image_pull_secret '''
remove_idx = None
for idx, sec in enumerate(self.image_pull_secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.image_pull_secrets[remove_idx]
return True
return False
def find_secret(self, inc_secret):
'''find secret'''
for secret in self.secrets:
if secret['name'] == inc_secret:
return secret
return None
def find_image_pull_secret(self, inc_secret):
'''find secret'''
for secret in self.image_pull_secrets:
if secret['name'] == inc_secret:
return secret
return None
def add_secret(self, inc_secret):
'''add secret'''
if self.secrets:
self.secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.secrets_path, [{"name": inc_secret}])
def add_image_pull_secret(self, inc_secret):
'''add image_pull_secret'''
if self.image_pull_secrets:
self.image_pull_secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.image_pull_secrets_path, [{"name": inc_secret}])
# -*- -*- -*- End included fragment: lib/serviceaccount.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_serviceaccount_secret.py -*- -*- -*-
class OCServiceAccountSecret(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'sa'
def __init__(self, config, verbose=False):
''' Constructor for OpenshiftOC '''
super(OCServiceAccountSecret, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
self.verbose = verbose
self._service_account = None
@property
def service_account(self):
''' Property for the service account '''
if not self._service_account:
self.get()
return self._service_account
@service_account.setter
def service_account(self, data):
''' setter for the service account '''
self._service_account = data
def exists(self, in_secret):
''' verifies if secret exists in the service account '''
result = self.service_account.find_secret(in_secret)
if not result:
return False
return True
def get(self):
''' get the service account definition from the master '''
sao = self._get(OCServiceAccountSecret.kind, self.config.name)
if sao['returncode'] == 0:
self.service_account = ServiceAccount(content=sao['results'][0])
sao['results'] = self.service_account.get('secrets')
return sao
def delete(self):
''' delete secrets '''
modified = []
for rem_secret in self.config.secrets:
modified.append(self.service_account.delete_secret(rem_secret))
if any(modified):
return self._replace_content(OCServiceAccountSecret.kind, self.config.name, self.service_account.yaml_dict)
return {'returncode': 0, 'changed': False}
def put(self):
''' place secrets into sa '''
modified = False
for add_secret in self.config.secrets:
if not self.service_account.find_secret(add_secret):
self.service_account.add_secret(add_secret)
modified = True
if modified:
return self._replace_content(OCServiceAccountSecret.kind, self.config.name, self.service_account.yaml_dict)
return {'returncode': 0, 'changed': False}
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
''' run the ansible idempotent code '''
sconfig = ServiceAccountConfig(params['service_account'],
params['namespace'],
params['kubeconfig'],
[params['secret']],
None)
oc_sa_sec = OCServiceAccountSecret(sconfig, verbose=params['debug'])
state = params['state']
api_rval = oc_sa_sec.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': "list"}
########
# Delete
########
if state == 'absent':
if oc_sa_sec.exists(params['secret']):
if check_mode:
return {'changed': True, 'msg': 'Would have removed the " + \
"secret from the service account.'}
api_rval = oc_sa_sec.delete()
return {'changed': True, 'results': api_rval, 'state': "absent"}
return {'changed': False, 'state': "absent"}
if state == 'present':
########
# Create
########
if not oc_sa_sec.exists(params['secret']):
if check_mode:
return {'changed': True, 'msg': 'Would have added the ' + \
'secret to the service account.'}
# Create it here
api_rval = oc_sa_sec.put()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_sa_sec.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': "present"}
return {'changed': False, 'results': api_rval, 'state': "present"}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state,
'state': 'unknown'}
# -*- -*- -*- End included fragment: class/oc_serviceaccount_secret.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_serviceaccount_secret.py -*- -*- -*-
def main():
'''
ansible oc module to manage service account secrets.
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default=None, required=True, type='str'),
secret=dict(default=None, type='str'),
service_account=dict(required=True, type='str'),
),
supports_check_mode=True,
)
rval = OCServiceAccountSecret.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_serviceaccount_secret.py -*- -*- -*-
| apache-2.0 | -6,776,691,593,204,628,000 | 33.076385 | 301 | 0.535104 | false |
atamazian/traffic-proc-tools | gg1_function.py | 1 | 2914 | #Simulate queuing system G/G/1
#@Author: Nguyen Duc Viet
import random as rd
import numpy as np
import simpy
#Function for epirical data --------------------------------------------------------------------------------------------
data_wt = []
def arrival(env, number,counter,interval,time_service):
for i in range(number):
t = interval[i]
yield env.timeout(t)
c = service(env,'Customer %02d'%i,counter,i,time_service[i])
env.process(c)
def service(env,name, counter,i, time_service):
arrive = env.now
with counter.request() as req:
yield req
wait = env.now - arrive
#print('%7.4f %s: Waited %6.3f' % (env.now, name, wait))
data_wt.append(wait)
ts = time_service
yield env.timeout(ts)
#print('%7.4f %s: Finished' % (env.now, name))
def simulate_gg1(n,interval_time,time_service):
env = simpy.Environment()
counter = simpy.Resource(env, capacity=1)
t = env.now
env.process(arrival(env,n,counter,interval_time,time_service))
env.run()
t = env.now - t
#print("\nTotal simulation time: %f"% t)
tw = np.array(data_wt)
ts = np.array(time_service)
del data_wt[:] #reset list variable containing waiting time
b=0 #busy time of server
for i in range(n):
b = b+ts[i]
t_in_system = tw.sum() + b # Total time spent in system of all packet = total waiting time + total service time
#print("Total waiting time of %i packets: %f" %(n,tw.sum()))
#print("Total time spent in system of %i packets: %f\n" %(n,t_in_system))
#Caculate output parameters: Utilization; mean time spent in system; mean number of clients
u = b/t
w = t_in_system/n #Mean time spent in system
l = t_in_system/t #Mean number of clients in the system
return (u,w,l)
#-----------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
#function for simulating M/M/1
def simulate_MM1(lamb_da,mu):
u = lamb_da/mu
if u>1:
u=1
W =1/(mu-lamb_da)
Wq = W - 1/mu
L = lamb_da*W
return (u,W,L)
#-----------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
#Function for simulating QE/QE/1
def qexp_rate(q, ave):
rate = 1/(ave*(3-2*q))
return rate
def ts_qlog(x,q):
if q==1:
y=np.log(x)
else:
y = (x**(1-q)-1)/(1-q)
return y
def rand_qexp(N,q,rate):
q1 = 1/(2-q)
u = np.random.uniform(0,1,size=(1,N))
y = -q1*ts_qlog(u,q1)/rate
return y
| mit | 5,512,354,887,458,386,000 | 32.494253 | 128 | 0.46431 | false |
myarjunar/QGIS | python/plugins/processing/algs/grass7/Grass7Utils.py | 1 | 18151 | # -*- coding: utf-8 -*-
"""
***************************************************************************
GrassUtils.py
---------------------
Date : February 2015
Copyright : (C) 2014-2015 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
from builtins import object
__author__ = 'Victor Olaya'
__date__ = 'February 2015'
__copyright__ = '(C) 2014-2015, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import stat
import shutil
import subprocess
import os
from qgis.core import QgsApplication
from qgis.PyQt.QtCore import QCoreApplication
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.ProcessingLog import ProcessingLog
from processing.tools.system import userFolder, isWindows, isMac, tempFolder, mkdir
from processing.tests.TestData import points
class Grass7Utils(object):
GRASS_REGION_XMIN = 'GRASS7_REGION_XMIN'
GRASS_REGION_YMIN = 'GRASS7_REGION_YMIN'
GRASS_REGION_XMAX = 'GRASS7_REGION_XMAX'
GRASS_REGION_YMAX = 'GRASS7_REGION_YMAX'
GRASS_REGION_CELLSIZE = 'GRASS7_REGION_CELLSIZE'
GRASS_FOLDER = 'GRASS7_FOLDER'
GRASS_LOG_COMMANDS = 'GRASS7_LOG_COMMANDS'
GRASS_LOG_CONSOLE = 'GRASS7_LOG_CONSOLE'
GRASS_HELP_PATH = 'GRASS_HELP_PATH'
sessionRunning = False
sessionLayers = {}
projectionSet = False
isGrass7Installed = False
version = None
@staticmethod
def grassBatchJobFilename():
'''This is used in Linux. This is the batch job that we assign to
GRASS_BATCH_JOB and then call GRASS and let it do the work
'''
filename = 'grass7_batch_job.sh'
batchfile = os.path.join(userFolder(), filename)
return batchfile
@staticmethod
def grassScriptFilename():
'''This is used in windows. We create a script that initializes
GRASS and then uses grass commands
'''
filename = 'grass7_script.bat'
filename = os.path.join(userFolder(), filename)
return filename
@staticmethod
def installedVersion(run=False):
if Grass7Utils.isGrass7Installed and not run:
return Grass7Utils.version
if Grass7Utils.grassPath() is None:
return None
for command in ["grass73", "grass72", "grass71", "grass70", "grass"]:
with subprocess.Popen(
["{} -v".format(command)],
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
universal_newlines=True,
) as proc:
try:
lines = proc.stdout.readlines()
for line in lines:
if "GRASS GIS " in line:
line = line.split(" ")[-1].strip()
if line.startswith("7."):
Grass7Utils.version = line
Grass7Utils.command = command
return Grass7Utils.version
except:
pass
return None
@staticmethod
def grassPath():
if not isWindows() and not isMac():
return ''
folder = ProcessingConfig.getSetting(Grass7Utils.GRASS_FOLDER) or ''
if not os.path.exists(folder):
folder = None
if folder is None:
if isWindows():
if "OSGEO4W_ROOT" in os.environ:
testfolder = os.path.join(str(os.environ['OSGEO4W_ROOT']), "apps")
else:
testfolder = str(QgsApplication.prefixPath())
testfolder = os.path.join(testfolder, 'grass')
if os.path.isdir(testfolder):
for subfolder in os.listdir(testfolder):
if subfolder.startswith('grass-7'):
folder = os.path.join(testfolder, subfolder)
break
else:
folder = os.path.join(str(QgsApplication.prefixPath()), 'grass7')
if not os.path.isdir(folder):
folder = '/Applications/GRASS-7.0.app/Contents/MacOS'
return folder or ''
@staticmethod
def grassDescriptionPath():
return os.path.join(os.path.dirname(__file__), 'description')
@staticmethod
def createGrass7Script(commands):
folder = Grass7Utils.grassPath()
script = Grass7Utils.grassScriptFilename()
gisrc = os.path.join(userFolder(), 'processing.gisrc7') # FIXME: use temporary file
# Temporary gisrc file
with open(gisrc, 'w') as output:
location = 'temp_location'
gisdbase = Grass7Utils.grassDataFolder()
output.write('GISDBASE: ' + gisdbase + '\n')
output.write('LOCATION_NAME: ' + location + '\n')
output.write('MAPSET: PERMANENT \n')
output.write('GRASS_GUI: text\n')
with open(script, 'w') as output:
output.write('set HOME=' + os.path.expanduser('~') + '\n')
output.write('set GISRC=' + gisrc + '\n')
output.write('set WINGISBASE=' + folder + '\n')
output.write('set GISBASE=' + folder + '\n')
output.write('set GRASS_PROJSHARE=' + os.path.join(folder, 'share', 'proj') + '\n')
output.write('set GRASS_MESSAGE_FORMAT=plain\n')
# Replacement code for etc/Init.bat
output.write('if "%GRASS_ADDON_PATH%"=="" set PATH=%WINGISBASE%\\bin;%WINGISBASE%\\lib;%PATH%\n')
output.write('if not "%GRASS_ADDON_PATH%"=="" set PATH=%WINGISBASE%\\bin;%WINGISBASE%\\lib;%GRASS_ADDON_PATH%;%PATH%\n')
output.write('\n')
output.write('set GRASS_VERSION=' + Grass7Utils.installedVersion() + '\n')
output.write('if not "%LANG%"=="" goto langset\n')
output.write('FOR /F "usebackq delims==" %%i IN (`"%WINGISBASE%\\etc\\winlocale"`) DO @set LANG=%%i\n')
output.write(':langset\n')
output.write('\n')
output.write('set PATHEXT=%PATHEXT%;.PY\n')
output.write('set PYTHONPATH=%PYTHONPATH%;%WINGISBASE%\\etc\\python;%WINGISBASE%\\etc\\wxpython\\n')
output.write('\n')
output.write('g.gisenv.exe set="MAPSET=PERMANENT"\n')
output.write('g.gisenv.exe set="LOCATION=' + location + '"\n')
output.write('g.gisenv.exe set="LOCATION_NAME=' + location + '"\n')
output.write('g.gisenv.exe set="GISDBASE=' + gisdbase + '"\n')
output.write('g.gisenv.exe set="GRASS_GUI=text"\n')
for command in commands:
Grass7Utils.writeCommand(output, command)
output.write('\n')
output.write('exit\n')
@staticmethod
def createGrass7BatchJobFileFromGrass7Commands(commands):
with open(Grass7Utils.grassBatchJobFilename(), 'w') as fout:
for command in commands:
Grass7Utils.writeCommand(fout, command)
fout.write('exit')
@staticmethod
def grassMapsetFolder():
folder = os.path.join(Grass7Utils.grassDataFolder(), 'temp_location')
mkdir(folder)
return folder
@staticmethod
def grassDataFolder():
tempfolder = os.path.join(tempFolder(), 'grassdata')
mkdir(tempfolder)
return tempfolder
@staticmethod
def createTempMapset():
'''Creates a temporary location and mapset(s) for GRASS data
processing. A minimal set of folders and files is created in the
system's default temporary directory. The settings files are
written with sane defaults, so GRASS can do its work. The mapset
projection will be set later, based on the projection of the first
input image or vector
'''
folder = Grass7Utils.grassMapsetFolder()
mkdir(os.path.join(folder, 'PERMANENT'))
mkdir(os.path.join(folder, 'PERMANENT', '.tmp'))
Grass7Utils.writeGrass7Window(os.path.join(folder, 'PERMANENT', 'DEFAULT_WIND'))
with open(os.path.join(folder, 'PERMANENT', 'MYNAME'), 'w') as outfile:
outfile.write(
'QGIS GRASS GIS 7 interface: temporary data processing location.\n')
Grass7Utils.writeGrass7Window(os.path.join(folder, 'PERMANENT', 'WIND'))
mkdir(os.path.join(folder, 'PERMANENT', 'sqlite'))
with open(os.path.join(folder, 'PERMANENT', 'VAR'), 'w') as outfile:
outfile.write('DB_DRIVER: sqlite\n')
outfile.write('DB_DATABASE: $GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db\n')
@staticmethod
def writeGrass7Window(filename):
with open(filename, 'w') as out:
out.write('proj: 0\n')
out.write('zone: 0\n')
out.write('north: 1\n')
out.write('south: 0\n')
out.write('east: 1\n')
out.write('west: 0\n')
out.write('cols: 1\n')
out.write('rows: 1\n')
out.write('e-w resol: 1\n')
out.write('n-s resol: 1\n')
out.write('top: 1\n')
out.write('bottom: 0\n')
out.write('cols3: 1\n')
out.write('rows3: 1\n')
out.write('depths: 1\n')
out.write('e-w resol3: 1\n')
out.write('n-s resol3: 1\n')
out.write('t-b resol: 1\n')
@staticmethod
def prepareGrass7Execution(commands):
env = os.environ.copy()
if isWindows():
Grass7Utils.createGrass7Script(commands)
command = ['cmd.exe', '/C ', Grass7Utils.grassScriptFilename()]
else:
gisrc = os.path.join(userFolder(), 'processing.gisrc7')
env['GISRC'] = gisrc
env['GRASS_MESSAGE_FORMAT'] = 'plain'
env['GRASS_BATCH_JOB'] = Grass7Utils.grassBatchJobFilename()
if 'GISBASE' in env:
del env['GISBASE']
Grass7Utils.createGrass7BatchJobFileFromGrass7Commands(commands)
os.chmod(Grass7Utils.grassBatchJobFilename(), stat.S_IEXEC | stat.S_IREAD | stat.S_IWRITE)
if isMac() and os.path.exists(os.path.join(Grass7Utils.grassPath(), 'grass.sh')):
command = os.path.join(Grass7Utils.grassPath(), 'grass.sh') + ' ' \
+ os.path.join(Grass7Utils.grassMapsetFolder(), 'PERMANENT')
else:
command = Grass7Utils.command + ' ' + os.path.join(Grass7Utils.grassMapsetFolder(), 'PERMANENT')
return command, env
@staticmethod
def executeGrass7(commands, feedback, outputCommands=None):
loglines = []
loglines.append(Grass7Utils.tr('GRASS GIS 7 execution console output'))
grassOutDone = False
command, grassenv = Grass7Utils.prepareGrass7Execution(commands)
with subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
universal_newlines=True,
env=grassenv
) as proc:
for line in iter(proc.stdout.readline, ''):
if 'GRASS_INFO_PERCENT' in line:
try:
feedback.setProgress(int(line[len('GRASS_INFO_PERCENT') + 2:]))
except:
pass
else:
if 'r.out' in line or 'v.out' in line:
grassOutDone = True
loglines.append(line)
feedback.pushConsoleInfo(line)
# Some GRASS scripts, like r.mapcalculator or r.fillnulls, call
# other GRASS scripts during execution. This may override any
# commands that are still to be executed by the subprocess, which
# are usually the output ones. If that is the case runs the output
# commands again.
if not grassOutDone and outputCommands:
command, grassenv = Grass7Utils.prepareGrass7Execution(outputCommands)
with subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
universal_newlines=True,
env=grassenv
) as proc:
for line in iter(proc.stdout.readline, ''):
if 'GRASS_INFO_PERCENT' in line:
try:
feedback.setProgress(int(
line[len('GRASS_INFO_PERCENT') + 2:]))
except:
pass
else:
loglines.append(line)
feedback.pushConsoleInfo(line)
if ProcessingConfig.getSetting(Grass7Utils.GRASS_LOG_CONSOLE):
ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines)
# GRASS session is used to hold the layers already exported or
# produced in GRASS between multiple calls to GRASS algorithms.
# This way they don't have to be loaded multiple times and
# following algorithms can use the results of the previous ones.
# Starting a session just involves creating the temp mapset
# structure
@staticmethod
def startGrass7Session():
if not Grass7Utils.sessionRunning:
Grass7Utils.createTempMapset()
Grass7Utils.sessionRunning = True
# End session by removing the temporary GRASS mapset and all
# the layers.
@staticmethod
def endGrass7Session():
shutil.rmtree(Grass7Utils.grassMapsetFolder(), True)
Grass7Utils.sessionRunning = False
Grass7Utils.sessionLayers = {}
Grass7Utils.projectionSet = False
@staticmethod
def getSessionLayers():
return Grass7Utils.sessionLayers
@staticmethod
def addSessionLayers(exportedLayers):
Grass7Utils.sessionLayers = dict(
list(Grass7Utils.sessionLayers.items()) +
list(exportedLayers.items()))
@staticmethod
def checkGrass7IsInstalled(ignorePreviousState=False):
if isWindows():
path = Grass7Utils.grassPath()
if path == '':
return Grass7Utils.tr(
'GRASS GIS 7 folder is not configured. Please configure '
'it before running GRASS GIS 7 algorithms.')
cmdpath = os.path.join(path, 'bin', 'r.out.gdal.exe')
if not os.path.exists(cmdpath):
return Grass7Utils.tr(
'The specified GRASS 7 folder "{}" does not contain '
'a valid set of GRASS 7 modules.\nPlease, go to the '
'Processing settings dialog, and check that the '
'GRASS 7\nfolder is correctly configured'.format(os.path.join(path, 'bin')))
if not ignorePreviousState:
if Grass7Utils.isGrass7Installed:
return
try:
from processing import runalg
result = runalg(
'grass7:v.voronoi',
points(),
False,
False,
None,
-1,
0.0001,
0,
None,
)
if not os.path.exists(result['output']):
return Grass7Utils.tr(
'It seems that GRASS GIS 7 is not correctly installed and '
'configured in your system.\nPlease install it before '
'running GRASS GIS 7 algorithms.')
except:
return Grass7Utils.tr(
'Error while checking GRASS GIS 7 installation. GRASS GIS 7 '
'might not be correctly configured.\n')
Grass7Utils.isGrass7Installed = True
@staticmethod
def tr(string, context=''):
if context == '':
context = 'Grass7Utils'
return QCoreApplication.translate(context, string)
@staticmethod
def writeCommand(output, command):
try:
# Python 2
output.write(command.encode('utf8') + '\n')
except TypeError:
# Python 3
output.write(command + '\n')
@staticmethod
def grassHelpPath():
helpPath = ProcessingConfig.getSetting(Grass7Utils.GRASS_HELP_PATH)
if helpPath is None:
if isWindows():
localPath = os.path.join(Grass7Utils.grassPath(), 'docs/html')
if os.path.exists(localPath):
helpPath = os.path.abspath(localPath)
elif isMac():
localPath = '/Applications/GRASS-7.0.app/Contents/MacOS/docs/html'
if os.path.exists(localPath):
helpPath = os.path.abspath(localPath)
else:
searchPaths = ['/usr/share/doc/grass-doc/html',
'/opt/grass/docs/html',
'/usr/share/doc/grass/docs/html']
for path in searchPaths:
if os.path.exists(path):
helpPath = os.path.abspath(path)
break
return helpPath if helpPath is not None else 'http://grass.osgeo.org/{}/manuals/'.format(Grass7Utils.command)
| gpl-2.0 | -5,151,247,994,440,638,000 | 39.515625 | 132 | 0.552311 | false |
yuma-m/pychord | pychord/progression.py | 1 | 3153 | # -*- coding: utf-8 -*-
from .chord import as_chord, Chord
class ChordProgression(object):
""" Class to handle chord progressions.
:param list[pychord.Chord] _chords: component chords of chord progression.
"""
def __init__(self, initial_chords=None):
""" Constructor of ChordProgression instance.
:type initial_chords: str|pychord.Chord|list
:param initial_chords: Initial chord or chords of the chord progressions
"""
if initial_chords is None:
initial_chords = []
if isinstance(initial_chords, Chord):
self._chords = [initial_chords]
elif isinstance(initial_chords, str):
self._chords = [as_chord(initial_chords)]
elif isinstance(initial_chords, list):
self._chords = [as_chord(chord) for chord in initial_chords]
else:
raise TypeError("Cannot initialize ChordProgression with argument of {} type".format(type(initial_chords)))
def __unicode__(self):
return " | ".join([chord.chord for chord in self._chords])
def __str__(self):
return " | ".join([chord.chord for chord in self._chords])
def __repr__(self):
return "<ChordProgression: {}>".format(" | ".join([chord.chord for chord in self._chords]))
def __add__(self, other):
self._chords += other.chords
return self
def __len__(self):
return len(self._chords)
def __getitem__(self, item):
return self._chords[item]
def __setitem__(self, key, value):
self._chords[key] = value
def __eq__(self, other):
if not isinstance(other, ChordProgression):
raise TypeError("Cannot compare ChordProgression object with {} object".format(type(other)))
if len(self) != len(other):
return False
for c, o in zip(self, other):
if c != o:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
@property
def chords(self):
""" Get component chords of chord progression
:rtype: list[pychord.Chord]
"""
return self._chords
def append(self, chord):
""" Append a chord to chord progressions
:type chord: str|pychord.Chord
:param chord: A chord to append
:return:
"""
self._chords.append(as_chord(chord))
def insert(self, index, chord):
""" Insert a chord to chord progressions
:param int index: Index to insert a chord
:type chord: str|pychord.Chord
:param chord: A chord to insert
:return:
"""
self._chords.insert(index, as_chord(chord))
def pop(self, index=-1):
""" Pop a chord from chord progressions
:param int index: Index of the chord to pop (default: -1)
:return: pychord.Chord
"""
return self._chords.pop(index)
def transpose(self, trans):
""" Transpose whole chord progressions
:param int trans: Transpose key
:return:
"""
for chord in self._chords:
chord.transpose(trans)
| mit | 278,924,370,740,419,650 | 28.745283 | 119 | 0.582937 | false |
Kobzol/debug-visualizer | debugger/pycgdb/programinfo.py | 1 | 2577 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Jakub Beranek
#
# This file is part of Devi.
#
# Devi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Devi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Devi. If not, see <http://www.gnu.org/licenses/>.
#
import os
class ProgramInfo(object):
def __init__(self, elffile):
"""
@type elffile: elftools.elf.elffile.ELFFile
"""
dwarf_info = elffile.get_dwarf_info()
self.files = {}
self.addresses = {}
for cu in dwarf_info.iter_CUs():
line_program = dwarf_info.line_program_for_CU(cu)
if line_program:
for line_entry in line_program.get_entries():
if line_entry.state:
self._parse_line_state(line_program.header.file_entry,
line_entry.state)
for die in cu.iter_DIEs():
self._parse_die(die)
def has_file(self, file):
return os.path.abspath(file) in self.files
def has_location(self, file, line):
file = os.path.abspath(file)
return self.has_file(file) and line in self.files[file]
def get_address(self, file, line):
file = os.path.abspath(file)
if not self.has_location(file, line):
return None
else:
return self.files[file][line][0]
def get_location(self, address):
if address in self.addresses:
return self.addresses[address]
else:
return None
def _parse_die(self, die):
for child in die.iter_children():
self._parse_die(child)
def _parse_line_state(self, files, line_state):
file = os.path.abspath(files[line_state.file - 1].name)
line = line_state.line
address = line_state.address
if file not in self.files:
self.files[file] = {}
if line not in self.files[file]:
self.files[file][line] = []
self.files[file][line].append(address)
self.addresses[address] = (file, line)
| gpl-3.0 | 6,166,318,167,629,636,000 | 29.317647 | 78 | 0.589445 | false |
MicrosoftGenomics/FaST-LMM | fastlmm/util/runner/examples.py | 1 | 2867 | import math
from fastlmm.util.mapreduce import map_reduce
from fastlmm.util.runner import Local, LocalMultiProc, HPC, LocalMultiThread
import os
def is_prime(n):
assert n == int(n) and n>1, "Expect integers greater than 1"
for j in xrange(2,int(math.sqrt(n))+1):
if n % j == 0:
return False
return True
# Iterative algorithm for finding prime numbers in a range
def prime_search0(start,stop):
assert start < stop, "start must be less than stop"
prime_list = []
for i in xrange(start,stop):
if is_prime(i):
prime_list.append(i)
return prime_list
# The similar map_reduce algorithm for finding prime numbers in a range
def prime_search1(start,stop,runner):
def mapper(i):
if is_prime(i):
#assert i != 5, "I just don't like fives"
return i
else:
return None
def reducer(sequence):
result = []
for i in sequence:
if i is not None:
result.append(i)
return result
return map_reduce(xrange(start,stop),
mapper=mapper,
reducer=reducer, #lambda sequence: [i for i in sequence if i is not None], #Filter out the None's
runner=runner)
if __name__ == '__main__':
#Run the iterative algorithm
#print prime_search0(2,10) #=> [2, 3, 5, 7]
#Run the map_reduce algorithm locally.
#print prime_search1(2,10,runner=Local()) #=> [2, 3, 5, 7]
#Now we run map_reduce on 20 processors.
#from PrimeNumbers.examples import prime_search1 #If not running local, must import your function. (Recall that for import to work, you also need an empty __init__.py).
#print prime_search1(2,10,runner=LocalMultiProc(20)) #=> [2, 3, 5, 7]
#Finally we run on HPC
#------- To run on HPC must create an hpc cluster object
#remote_python_parent=r"\\GCR\Scratch\B99\escience\{0}\ppv0".format(os.environ['USERNAME']) #where to copy your "python_path" code to.
#hpc_runner= HPC(10, 'GCR',r"\\GCR\Scratch\B99\escience",
# remote_python_parent=remote_python_parent,
# unit='node', #core, socket, node
# update_remote_python_parent=True,
# template="Preemptable",
# priority="Lowest",
# nodegroups="Preemptable",
# runtime="0:11:0", # day:hour:min
# )
#runner=LocalMultiProc(2,just_one_process=False)
#runner = Local()
runner = LocalMultiThread(2,just_one_process=False)
print prime_search1(2,10,runner=runner) #=> [2, 3, 5, 7]
print "done" | apache-2.0 | -1,530,155,641,020,250,400 | 39.394366 | 172 | 0.55354 | false |
electronic-library/electronic-library-core | tests/library_test.py | 1 | 5083 | """
Unit tests for class Library.
"""
from library.bookmark import Bookmark
from library.book import Book
from library.library import Library
lib = Library()
book1_refs = {
'Authors': ['OS guy', 'Some guy'],
'Publisher': 'Forgot publisher',
'Edition': 9,
'Chapters': 18,
'Pages': 900
}
book2_refs = {
'Authors': 'Me',
'Edition': 5,
'Chapters': 7,
'Pages': 900
}
book3_refs = {
'Authors': ['Scott Chacon', 'Ben Straub'],
'Publisher': 'Apress',
'Edition': 2,
'Chapters': 10
}
book1 = Book(title='Operating System Concepts',
refs=book1_refs)
book2 = Book(title='Computer Organization and Design',
refs=book2_refs)
book3 = Book(title='Pro Git',
refs=book3_refs)
bookmark1_refs = {
'Book Title': 'Operating System Concepts',
'Edition': 9,
'Chapter': 10,
'Page': 485
}
bookmark2_refs = {
'Book Title': 'Operating System Concepts',
'Edition': 9,
'Chapter': '11.3.1',
'Page': 517
}
bookmark3_refs = {
'Book Title': 'Pro Git',
'Edition': 2,
'Chapter': 3,
'Page': 81
}
bookmark1 = Bookmark(title='File Systems',
category='Operating Systems',
refs=bookmark1_refs)
bookmark2 = Bookmark(title='Storage Structure',
category='Operating Systems',
refs=bookmark2_refs)
bookmark3 = Bookmark(title='Git Branching',
category='Git',
refs=bookmark3_refs)
lib.add_book(book1)
lib.add_book(book2)
lib.add_book(book3)
book1.add_bookmark(bookmark1)
book1.add_bookmark(bookmark2)
book3.add_bookmark(bookmark3)
class TestLibrary:
def test_book(self):
# test with ID
assert(lib.book(book1.id()) == book1)
assert(lib.book(book1.id()) != book2)
assert(lib.book(book1.id()) != book3)
assert(lib.book(book2.id()) != book1)
assert(lib.book(book2.id()) == book2)
assert(lib.book(book2.id()) != book3)
assert(lib.book(book3.id()) != book1)
assert(lib.book(book3.id()) != book2)
assert(lib.book(book3.id()) == book3)
# test with obj
assert(lib.book(book1) == book1)
assert(lib.book(book1) != book2)
assert(lib.book(book1) != book3)
assert(lib.book(book2) != book1)
assert(lib.book(book2) == book2)
assert(lib.book(book2) != book3)
assert(lib.book(book3) != book1)
assert(lib.book(book3) != book2)
assert(lib.book(book3) == book3)
# test with dict
assert(lib.book(book1_refs) == book1)
assert(lib.book(book1_refs) != book2)
assert(lib.book(book1_refs) != book3)
assert(lib.book(book2_refs) != book1)
assert(lib.book(book2_refs) == book2)
assert(lib.book(book2_refs) != book3)
assert(lib.book(book3_refs) != book1)
assert(lib.book(book3_refs) != book2)
assert(lib.book(book3_refs) == book3)
# test with str for title
assert(lib.book(book1.title()) == book1)
assert(lib.book(book1.title()) != book2)
assert(lib.book(book1.title()) != book3)
assert(lib.book(book2.title()) != book1)
assert(lib.book(book2.title()) == book2)
assert(lib.book(book2.title()) != book3)
assert(lib.book(book3.title()) != book1)
assert(lib.book(book3.title()) != book2)
assert(lib.book(book3.title()) == book3)
def test_book_with_bookmark(self):
assert(lib.book_with_bookmark(bookmark1) == book1)
assert(lib.book_with_bookmark(bookmark1) != book2)
assert(lib.book_with_bookmark(bookmark1) != book3)
assert(lib.book_with_bookmark(bookmark2) == book1)
assert(lib.book_with_bookmark(bookmark2) != book2)
assert(lib.book_with_bookmark(bookmark2) != book3)
assert(lib.book_with_bookmark(bookmark3) != book1)
assert(lib.book_with_bookmark(bookmark3) != book2)
assert(lib.book_with_bookmark(bookmark3) == book3)
def test_bookmarks_of_book(self):
assert(lib.bookmarks_of_book(book1) == {bookmark1, bookmark2})
assert(lib.bookmarks_of_book(book1) == {bookmark2, bookmark1})
assert(lib.bookmarks_of_book(book1) != {bookmark1, bookmark3})
assert(lib.bookmarks_of_book(book1) != {bookmark2, bookmark3})
assert(lib.bookmarks_of_book(book2) == set())
assert(lib.bookmarks_of_book(book2) != {bookmark1, bookmark2})
assert(lib.bookmarks_of_book(book2) != {bookmark1, bookmark3})
assert(lib.bookmarks_of_book(book2) != {bookmark2, bookmark3})
assert(lib.bookmarks_of_book(book3) == {bookmark3})
assert(lib.bookmarks_of_book(book3) != {bookmark1})
assert(lib.bookmarks_of_book(book3) != {bookmark2})
assert(lib.bookmarks_of_book(book3) != {bookmark1, bookmark3})
assert(lib.bookmarks_of_book(book3) != {bookmark2, bookmark3})
def test_add_category(self):
pass
def test_rm_category(self):
pass
def test_parent_category_of(self):
pass
| gpl-3.0 | 215,717,537,777,519,400 | 31.793548 | 70 | 0.598465 | false |
jeamland/wsproto | test/test_upgrade.py | 1 | 5508 | # -*- coding: utf-8 -*-
"""
Test the HTTP upgrade phase of connection
"""
import base64
import email
import random
import sys
from wsproto.connection import WSConnection, CLIENT, SERVER
from wsproto.events import (
ConnectionEstablished, ConnectionFailed, ConnectionRequested
)
IS_PYTHON3 = sys.version_info >= (3, 0)
def parse_headers(headers):
if IS_PYTHON3:
headers = email.message_from_bytes(headers)
else:
headers = email.message_from_string(headers)
return dict(headers.items())
class TestClientUpgrade(object):
def initiate(self, host, path, **kwargs):
ws = WSConnection(CLIENT, host, path, **kwargs)
data = ws.bytes_to_send()
request, headers = data.split(b'\r\n', 1)
method, path, version = request.strip().split()
headers = parse_headers(headers)
print(method, path, version)
print(repr(headers))
return ws, method, path, version, headers
def test_initiate_connection(self):
_host = 'frob.nitz'
_path = '/fnord'
ws, method, path, version, headers = self.initiate(
_host, _path, subprotocols=["foo", "bar"])
assert method == b'GET'
assert path == _path.encode('ascii')
assert headers['host'] == _host
assert headers['connection'].lower() == 'upgrade'
assert headers['upgrade'].lower() == 'websocket'
assert 'sec-websocket-key' in headers
assert 'sec-websocket-version' in headers
assert headers['sec-websocket-protocol'] == 'foo, bar'
def test_no_subprotocols(self):
ws, method, path, version, headers = self.initiate("foo", "/bar")
assert 'sec-websocket-protocol' not in headers
def test_correct_accept_token(self):
_host = 'frob.nitz'
_path = '/fnord'
ws, method, path, version, headers = self.initiate(_host, _path)
key = headers['sec-websocket-key'].encode('ascii')
accept_token = ws._generate_accept_token(key)
response = b"HTTP/1.1 101 Switching Protocols\r\n"
response += b"Connection: Upgrade\r\n"
response += b"Upgrade: WebSocket\r\n"
response += b"Sec-WebSocket-Accept: " + accept_token + b"\r\n"
response += b"\r\n"
ws.receive_bytes(response)
assert isinstance(next(ws.events()), ConnectionEstablished)
def test_incorrect_accept_token(self):
_host = 'frob.nitz'
_path = '/fnord'
ws, method, path, version, headers = self.initiate(_host, _path)
key = b'This is wrong token'
accept_token = ws._generate_accept_token(key)
response = b"HTTP/1.1 101 Switching Protocols\r\n"
response += b"Connection: Upgrade\r\n"
response += b"Upgrade: WebSocket\r\n"
response += b"Sec-WebSocket-Accept: " + accept_token + b"\r\n"
response += b"\r\n"
ws.receive_bytes(response)
assert isinstance(next(ws.events()), ConnectionFailed)
def test_bad_connection_header(self):
_host = 'frob.nitz'
_path = '/fnord'
ws, method, path, version, headers = self.initiate(_host, _path)
key = headers['sec-websocket-key'].encode('ascii')
accept_token = ws._generate_accept_token(key)
response = b"HTTP/1.1 101 Switching Protocols\r\n"
response += b"Connection: Updraft\r\n"
response += b"Upgrade: WebSocket\r\n"
response += b"Sec-WebSocket-Accept: " + accept_token + b"\r\n"
response += b"\r\n"
ws.receive_bytes(response)
assert isinstance(next(ws.events()), ConnectionFailed)
def test_bad_upgrade_header(self):
_host = 'frob.nitz'
_path = '/fnord'
ws, method, path, version, headers = self.initiate(_host, _path)
key = headers['sec-websocket-key'].encode('ascii')
accept_token = ws._generate_accept_token(key)
response = b"HTTP/1.1 101 Switching Protocols\r\n"
response += b"Connection: Upgrade\r\n"
response += b"Upgrade: SebWocket\r\n"
response += b"Sec-WebSocket-Accept: " + accept_token + b"\r\n"
response += b"\r\n"
ws.receive_bytes(response)
assert isinstance(next(ws.events()), ConnectionFailed)
class TestServerUpgrade(object):
def test_correct_request(self):
test_host = 'frob.nitz'
test_path = '/fnord'
ws = WSConnection(SERVER)
nonce = bytes(random.getrandbits(8) for x in range(0, 16))
nonce = base64.b64encode(nonce)
request = b"GET " + test_path.encode('ascii') + b" HTTP/1.1\r\n"
request += b'Host: ' + test_host.encode('ascii') + b'\r\n'
request += b'Connection: Upgrade\r\n'
request += b'Upgrade: WebSocket\r\n'
request += b'Sec-WebSocket-Version: 13\r\n'
request += b'Sec-WebSocket-Key: ' + nonce + b'\r\n'
request += b'\r\n'
ws.receive_bytes(request)
event = next(ws.events())
assert isinstance(event, ConnectionRequested)
ws.accept(event)
data = ws.bytes_to_send()
response, headers = data.split(b'\r\n', 1)
version, code, reason = response.split(b' ')
headers = parse_headers(headers)
accept_token = ws._generate_accept_token(nonce)
assert int(code) == 101
assert headers['connection'].lower() == 'upgrade'
assert headers['upgrade'].lower() == 'websocket'
assert headers['sec-websocket-accept'] == accept_token.decode('ascii')
| mit | -7,825,416,513,577,806,000 | 31.4 | 78 | 0.604757 | false |
ansobolev/regCMPostProc | src/plot.py | 1 | 2816 | #!/usr/bin/env python
# RegCM postprocessing tool
# Copyright (C) 2014 Aliou, Addisu, Kanhu, Andrey
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import matplotlib.pyplot as plt
import cartopy
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from value import Value
class Plotter(object):
def __init__(self, value):
self._value = value
self.lat, self.lon = value.latlon
def plot(self, coastlines=True,
countries=True,
places=True,
title=None,
levels = None):
if levels is not None:
l_min, l_max = levels
l = (l_max - l_min) / 10
levels = range(l_min, l_max + l, l)
projection = ccrs.PlateCarree()
self.fig, self.ax = plt.subplots(subplot_kw={'projection': projection})
if coastlines:
self.ax.coastlines('10m')
if countries:
countries = cfeature.NaturalEarthFeature(
scale='110m', category='cultural', name='admin_0_countries')
self.ax.add_feature(countries, color='r', alpha=0.1)
if places:
places = cfeature.NaturalEarthFeature(
scale='110m', category='cultural', name='populated_places')
self.ax.add_feature(places, color='b', hatch='o')
cx = self.ax.contourf(self.lon, self.lat, self._value.data, transform=ccrs.PlateCarree(),cmap='bwr', levels=levels)
# To mask out OCEAN or LAND
#ax.add_feature(cfeature.OCEAN)
#ax.add_feature(cfeature.LAND)
self.ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='blue', alpha=0.5, linestyle='-')
self.fig.colorbar(cx)
times = self._value.limits['time']
plt.title(self._value.title + ' [' + self._value.units + ']\n' +
'mean between ' + str(times[0]) + ' and ' + str(times[1]) + '\n')
def show(self):
plt.show()
def save(self, filename, format):
plt.savefig(filename + '.' + format)
def close(self):
plt.close(self.fig)
if __name__ == "__main__":
pass | gpl-3.0 | -1,203,394,349,944,687,000 | 34.2125 | 123 | 0.604759 | false |
Bismarrck/tensorflow | tensorflow/python/saved_model/function_serialization.py | 1 | 4869 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for serializing PolymorphicFunctions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as defun_lib
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.saved_model import saved_object_graph_pb2
def _serialize_polymorphic_function(polymorphic_function, node_ids):
"""Build a SavedPolymorphicProto."""
coder = nested_structure_coder.StructureCoder()
proto = saved_object_graph_pb2.SavedPolymorphicFunction()
proto.function_spec_tuple.CopyFrom(
coder.encode_structure(polymorphic_function.function_spec.as_tuple())) # pylint: disable=protected-access
for signature, concrete_function in list_all_concrete_functions(
polymorphic_function):
bound_inputs = []
try:
for capture in concrete_function.captured_inputs:
bound_inputs.append(node_ids[capture])
except KeyError:
# TODO(andresp): Would it better to throw an exception?
logging.warning(
"Concrete function %s not added to object based saved model as it "
"captures tensor %s which is unsupported or not reachable from root.",
concrete_function.name, capture)
continue
function_proto = proto.monomorphic_function.add()
function_proto.concrete_function = concrete_function.name
function_proto.canonicalized_input.CopyFrom(
coder.encode_structure(signature))
function_proto.bound_inputs.extend(bound_inputs)
return proto
def list_all_concrete_functions(polymorphic_function):
"""Given a polymorphic function, returns all of its concrete functions.
Args:
polymorphic_function: Instance of `PolymorphicFunction`.
Returns:
A list of tuples in the form (signature, concrete_function), where concrete
function is an instance of `Function`.
"""
input_signature = polymorphic_function._input_signature # pylint: disable=protected-access
if input_signature is not None:
polymorphic_function.get_concrete_function()
concrete_functions = []
for signature in polymorphic_function._cached_input_signatures: # pylint: disable=protected-access
if any(isinstance(arg, defun_lib.UnknownArgument) for arg in signature):
continue
concrete_function = polymorphic_function.get_concrete_function(*signature)
concrete_functions.append((signature, concrete_function))
return concrete_functions
def list_all_polymorphic_functions(checkpointable_object):
"""Given a checkpointable object, returns all of its polymorphic functions."""
polymorphic_functions = dict()
for attribute_name in dir(checkpointable_object):
try:
attribute_value = getattr(checkpointable_object, attribute_name, None)
except: # pylint: disable=bare-except
# We really don't want to throw an exception just because some object's
# attribute accessor is broken.
attribute_value = None
# TODO(allenl): Consider de-duplicating functions which are referenced
# from multiple attributes.
if isinstance(attribute_value, def_function.PolymorphicFunction):
polymorphic_functions[attribute_name] = attribute_value
return polymorphic_functions
def add_polymorphic_functions_to_object_graph_proto(checkpointable_objects,
saved_object_graph,
node_ids):
"""Finds PolymorphicFunctions attached to objects and saves them."""
existing_objects = list(zip(checkpointable_objects, saved_object_graph.nodes))
for obj, obj_proto in existing_objects:
for name, polymorphic_function in list_all_polymorphic_functions(
obj).items():
function_node_id = len(saved_object_graph.nodes)
function_node = saved_object_graph.nodes.add()
function_node.function.CopyFrom(
_serialize_polymorphic_function(polymorphic_function, node_ids))
reference = obj_proto.children.add()
reference.node_id = function_node_id
reference.local_name = name
| apache-2.0 | 3,175,076,075,852,673,500 | 43.669725 | 112 | 0.720476 | false |
robotican/ric | ric_board/scripts/RiCConfigurator/BAL/Devices/PPMReader.py | 1 | 2203 | __author__ = 'tom1231'
from PyQt4.QtCore import QUrl
from PyQt4.QtGui import *
from BAL.Interface.DeviceFrame import DeviceFrame, EX_DEV, PPMReader
from lxml.etree import Element, SubElement, XML
class PPMReader(DeviceFrame):
def __init__(self, frame, data):
DeviceFrame.__init__(self, EX_DEV, frame, data)
self._diffTopic = '/diff'
self._ppmTopic = '/RiC_PPM'
def fromDict(self, data):
self._diffTopic = data['diff']
self._ppmTopic = data['ppm']
def toDict(self):
data = dict()
data['type'] = PPMReader
data['diff'] = self._diffTopic
data['ppm'] = self._ppmTopic
return data
def add(self):
if not self.nameIsValid():
error = QErrorMessage()
error.setWindowTitle("Same name error")
error.showMessage("Name already taken.")
error.exec_()
self._isValid = False
return
self._diffTopic = str(self.diffTopic.text())
self._ppmTopic = str(self.ppmTopic.text())
self._isValid = True
def showDetails(self, items=None):
self.diffTopic = QLineEdit(self._diffTopic)
self.ppmTopic = QLineEdit(self._ppmTopic)
self._frame.layout().addRow(QLabel('Differential drive topic: '), self.diffTopic)
self._frame.layout().addRow(QLabel('PPM topic: '), self.ppmTopic)
def printDetails(self):
self._frame.layout().addRow(QLabel('Differential drive topic: '), QLabel(self._diffTopic))
self._frame.layout().addRow(QLabel('PPM topic: '), QLabel(self._ppmTopic))
def saveToFile(self, parent):
keysAtt = parent.keys()
ns = ''
if len(keysAtt) > 0 and keysAtt[0] == 'ns':
ns = '/' + parent.get('ns')
element = SubElement(parent, 'include', {
'file': '$(find ric_board)/scripts/RiCPPMReader.launch'
})
SubElement(element, 'arg', {
'name': 'ppmTopic',
'value': ns + self._ppmTopic
})
SubElement(element, 'arg', {
'name': 'diffTopic',
'value': ns + self._diffTopic
})
def getName(self):
return 'ppm_reader'
| bsd-3-clause | 7,213,415,209,181,855,000 | 30.028169 | 98 | 0.574671 | false |
bokeh/bokeh | examples/integration/glyphs/categorical_multi_glyphs.py | 1 | 1210 | from bokeh.io import show
from bokeh.layouts import gridplot
from bokeh.plotting import figure
x_range = ['a', 'b', 'c', 'd']
y_values = [1., 2., 3., 4.]
y_errors = [.1, .2, .3, .4]
err_xs = []
err_ys = []
for x, y, yerr in zip(x_range, y_values, y_errors):
err_xs.append((x, x))
err_ys.append((y - yerr, y + yerr))
p1 = figure(x_range=x_range, title="multi_line", width=300, height=300)
p1.square(x_range, y_values, size=7, line_alpha=0)
p1.multi_line(err_xs, err_ys)
p2 = figure(x_range=x_range, title="line", width=300, height=300)
p2.square(x_range, y_values, size=7, line_alpha=0)
for i in range(len(err_xs)):
p2.line(err_xs[i], err_ys[i])
patch1_x = ['foo','bar','bar','foo']
patch1_y = [1,1,2,2]
patch2_x = ['bar','ting','bar','foo']
patch2_y = [2,2,4,4]
patch_list_x = [patch1_x, patch2_x]
patch_list_y = [patch1_y, patch2_y]
p3 = figure(x_range=['foo', 'bar', 'ting'], y_range=(0, 5), title="patches", width=300, height=300)
p3.patches(patch_list_x, patch_list_y)
p4 = figure(x_range=['foo', 'bar', 'ting'], y_range=(0, 5), title="patch", width=300, height=300)
p4.patch(patch1_x, patch1_y)
p4.patch(patch2_x, patch2_y)
show(gridplot([[p1, p2], [p3, p4]], merge_tools=False))
| bsd-3-clause | -1,479,721,902,649,406,000 | 27.139535 | 99 | 0.621488 | false |
awslabs/chalice | tests/codelinter.py | 1 | 2180 | # These are linting checks used in the chalice codebase itself.
# These are used to enforce specific coding standards and constraints.
from pylint.checkers import BaseChecker
from pylint.interfaces import IAstroidChecker
from astroid.exceptions import InferenceError
import astroid
def register(linter):
linter.register_checker(ConditionalImports(linter))
class ConditionalImports(BaseChecker):
# This is used to ensure that any imports that rely on conditional
# dependencies must be wrapped in a try/except ImportError.
__implements__ = (IAstroidChecker,)
name = 'must-catch-import-error'
msgs = {
'C9997': ('Importing this module must catch ImportError.',
'must-catch-import-error',
'Importing this module must catch ImportError.'),
}
def visit_import(self, node):
names = [name[0] for name in node.names]
if 'chalice.cli.filewatch.eventbased' in names:
if not self._is_in_try_except_import_error(node):
self.add_message('must-catch-import-error', node=node)
return
def visit_importfrom(self, node):
if node.modname == 'chalice.cli.filewatch.eventbased':
names = [name[0] for name in node.names]
if 'WatchdogWorkerProcess' in names:
# Ensure this is wrapped in a try/except.
# Technically we should ensure anywhere in the call stack
# we're wrapped in a try/except, but in practice we'll just
# enforce you did that in the same scope as your import.
if not self._is_in_try_except_import_error(node):
self.add_message('must-catch-import-error', node=node)
return
def _is_in_try_except_import_error(self, node):
if not isinstance(node.parent, astroid.TryExcept):
return False
caught_exceptions = [
handler.type.name for handler in node.parent.handlers]
if 'ImportError' not in caught_exceptions:
# They wrapped a try/except but aren't catching
# ImportError.
return False
return True
| apache-2.0 | -3,445,982,109,611,971,000 | 40.923077 | 75 | 0.638532 | false |
smarkets/marge-bot | tests/git_repo_mock.py | 1 | 8075 | import logging as log
from collections import defaultdict
from datetime import timedelta
import functools
import shlex
import marge.git as git
class RepoMock(git.Repo):
@classmethod
def init_for_merge_request(cls, merge_request, initial_target_sha, project, forked_project=None):
assert bool(forked_project) == (
merge_request.source_project_id != merge_request.target_project_id
)
target_url = project.ssh_url_to_repo
source_url = forked_project.ssh_url_to_repo if forked_project else target_url
remote_repos = defaultdict(GitRepoModel)
remote_repos[source_url].set_ref(merge_request.source_branch, merge_request.sha)
remote_repos[target_url].set_ref(merge_request.target_branch, initial_target_sha)
result = cls(
remote_url=target_url,
local_path='/tmp/blah',
ssh_key_file='/home/homer/.ssh/id_rsa',
timeout=timedelta(seconds=1000000),
reference='the_reference',
)
# pylint: disable=attribute-defined-outside-init
result.mock_impl = GitModel(origin=target_url, remote_repos=remote_repos)
return result
def git(self, *args, from_repo=True):
command = args[0]
command_args = args[1:]
log.info('Run: git %r %s', command, ' '.join(map(repr, command_args)))
assert from_repo == (command != 'clone')
command_impl_name = command.replace('-', '_')
command_impl = getattr(self.mock_impl, command_impl_name, None)
assert command_impl, ('git: Unexpected command %s' % command)
try:
result = command_impl(*command_args)
except Exception:
log.warning('Failed to simulate: git %r %s', command, command_args)
raise
else:
return self._pretend_result_comes_from_popen(result)
@staticmethod
def _pretend_result_comes_from_popen(result):
result_bytes = ('' if result is None else str(result)).encode('ascii')
return stub(stdout=result_bytes)
class stub: # pylint: disable=invalid-name,too-few-public-methods
def __init__(self, **kwargs):
self.__dict__ = kwargs
class GitRepoModel:
def __init__(self, copy_of=None):
# pylint: disable=protected-access
self._refs = dict(copy_of._refs) if copy_of else {}
def set_ref(self, ref, commit):
self._refs[ref] = commit
def get_ref(self, ref):
return self._refs[ref]
def has_ref(self, ref):
return ref in self._refs
def del_ref(self, ref):
self._refs.pop(ref, None)
def __repr__(self):
return "<%s: %s>" % (type(self), self._refs)
class GitModel:
def __init__(self, origin, remote_repos):
assert origin in remote_repos
self.remote_repos = remote_repos
self._local_repo = GitRepoModel()
self._remotes = dict(origin=origin)
self._remote_refs = {}
self._branch = None
self.on_push_callbacks = []
@property
def _head(self):
return self._local_repo.get_ref(self._branch)
def remote(self, *args):
action = args[0]
if action == 'rm':
_, remote = args
try:
self._remotes.pop(remote)
except KeyError as err:
raise git.GitError('No such remote: %s' % remote) from err
elif action == 'add':
_, remote, url = args
self._remotes[remote] = url
else:
assert False, args
def fetch(self, *args):
_, remote_name = args
assert args == ('--prune', remote_name)
remote_url = self._remotes[remote_name]
remote_repo = self.remote_repos[remote_url]
self._remote_refs[remote_name] = GitRepoModel(copy_of=remote_repo)
def checkout(self, *args):
if args[0] == '-B': # -B == create if it doesn't exist
_, branch, start_point, _ = args
assert args == ('-B', branch, start_point, '--')
assert start_point == '' or '/' in start_point # '' when "local"
# create if it doesn't exist
if not self._local_repo.has_ref(branch):
if start_point:
remote_name, remote_branch = start_point.split('/')
assert remote_branch == branch
remote_url = self._remotes[remote_name]
remote_repo = self.remote_repos[remote_url]
commit = remote_repo.get_ref(branch)
self._local_repo.set_ref(branch, commit)
else:
self._local_repo.set_ref(branch, self._head)
else:
branch, _ = args
assert args == (branch, '--')
assert self._local_repo.has_ref(branch)
# checkout
self._branch = branch
def branch(self, *args):
if args[0] == "-D":
_, branch = args
assert self._branch != branch
self._local_repo.del_ref(branch)
else:
assert False
def rev_parse(self, arg):
if arg == 'HEAD':
return self._head
remote, branch = arg.split('/')
return self._remote_refs[remote].get_ref(branch)
def rebase(self, arg):
remote, branch = arg.split('/')
new_base = self._remote_refs[remote].get_ref(branch)
if new_base != self._head:
new_sha = 'rebase(%s onto %s)' % (self._head, new_base)
self._local_repo.set_ref(self._branch, new_sha)
def merge(self, arg):
remote, branch = arg.split('/')
other_ref = self._remote_refs[remote].get_ref(branch)
if other_ref != self._head:
new_sha = 'merge(%s with %s)' % (self._head, other_ref)
self._local_repo.set_ref(self._branch, new_sha)
def push(self, *args):
force_flag, remote_name, refspec = args
assert force_flag in ('', '--force')
branch, remote_branch = refspec.split(':')
remote_url = self._remotes[remote_name]
remote_repo = self.remote_repos[remote_url]
old_sha = remote_repo.get_ref(remote_branch)
new_sha = self._local_repo.get_ref(branch)
if force_flag:
remote_repo.set_ref(remote_branch, new_sha)
else:
expected_remote_sha = self._remote_refs[remote_name].get_ref(remote_branch)
if old_sha != expected_remote_sha:
raise git.GitError("conflict: can't push")
remote_repo.set_ref(remote_branch, new_sha)
for callback in self.on_push_callbacks:
callback(
remote_url=remote_url,
remote_branch=remote_branch,
old_sha=old_sha,
new_sha=new_sha,
)
def config(self, *args):
assert len(args) == 2 and args[0] == '--get'
_, remote, _ = elems = args[1].split('.')
assert elems == ['remote', remote, 'url'], elems
return self._remotes[remote]
def diff_index(self, *args):
assert args == ('--quiet', 'HEAD')
# we don't model dirty index
def ls_files(self, *args):
assert args == ('--others',)
# we don't model untracked files
def filter_branch(self, *args):
_, _, filter_cmd, commit_range = args
assert args == ('--force', '--msg-filter', filter_cmd, commit_range)
trailers_var, python, script_path = shlex.split(filter_cmd)
_, trailers_str = trailers_var.split('=')
assert trailers_var == "TRAILERS=%s" % trailers_str
assert python == "python3"
assert script_path.endswith("marge/trailerfilter.py")
trailers = list(sorted(set(line.split(':')[0] for line in trailers_str.split('\n'))))
assert trailers
new_sha = functools.reduce(
lambda x, f: "add-%s(%s)" % (f, x),
[trailer.lower() for trailer in trailers],
self._head
)
self._local_repo.set_ref(self._branch, new_sha)
return new_sha
| bsd-3-clause | -5,253,496,412,043,105,000 | 32.367769 | 101 | 0.560991 | false |
seporaitis/mysqlparse | tests/grammar/test_sql_file.py | 1 | 1373 | # -*- encoding:utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from mysqlparse.grammar.sql_file import sql_file_syntax
class SqlFileSyntaxTest(unittest.TestCase):
def test_multiple_statements(self):
sql_file = sql_file_syntax.parseString("""
CREATE TABLE test_table1 (
test_column1 INT(11) PRIMARY KEY AUTO_INCREMENT NOT NULL,
test_column2 INT(11) NOT NULL
);
ALTER TABLE test_table2 ADD col_no0 BIT(8) NOT NULL DEFAULT 0 FIRST,
ADD col_no1 LONGTEXT NOT NULL,
ADD col_no2 VARCHAR(200) NULL,
ADD col_no3 BIT(8) AFTER col0;
CREATE TABLE test_table3 (
test_column INT(11) PRIMARY KEY AUTO_INCREMENT NOT NULL
);
ALTER TABLE test_table4 ADD col_no0 BIT(8) NOT NULL DEFAULT 0 FIRST,
ADD col_no1 LONGTEXT NOT NULL,
ADD col_no2 VARCHAR(200) NULL,
ADD col_no3 BIT(8) AFTER col0;
""")
self.assertEqual(len(sql_file.statements), 4)
self.assertEqual(sql_file.statements[0].table_name, 'test_table1')
self.assertEqual(sql_file.statements[1].table_name, 'test_table2')
self.assertEqual(sql_file.statements[2].table_name, 'test_table3')
self.assertEqual(sql_file.statements[3].table_name, 'test_table4')
| mit | -2,081,226,468,887,657,500 | 35.131579 | 82 | 0.641661 | false |
ph1l/halo_radio | WebRoot/Preferences.py | 1 | 2083 | import HaloRadio.TopWeb as TopWeb
import HaloRadio.StyleListMaker as StyleListMaker
import HaloRadio.Style as Style
class plugin(TopWeb.TopWeb):
def GetReqs(self):
return "amv"
def handler(self, context):
import HaloRadio.UserSongStatsListMaker as UserSongStatsListMaker
import HaloRadio.UserSongStats as UserSongStats
import HaloRadio.Song as Song
import HaloRadio.User as User
import HaloRadio.Exception as Exception
# Username
if self.form.has_key("id"):
userid = int(self.form['id'].value)
else:
userid = self.user.id
user = User.User(userid)
if (self.do_authorize(self.user.rights, "a")):
is_user=0
canmod_user=1
ro_user=0
canmod_rights=1
ro_rights=0
canmod_email=1
ro_email=0
canmod_passwd=1
ro_passwd=0
elif (self.do_authorize(self.user.rights, "m")):
is_user=0
canmod_user=0
ro_user=1
canmod_rights=0
ro_rights=1
canmod_email=1
ro_email=0
canmod_passwd=1
ro_passwd=0
else:
is_user=1
canmod_user=0
ro_user=1
canmod_rights=0
ro_rights=1
canmod_email=0
ro_email=1
canmod_passwd=1
ro_passwd=0
context.addGlobal ("is_user", is_user)
context.addGlobal ("canmod_user", canmod_user )
context.addGlobal ("ro_user", ro_user)
context.addGlobal ("canmod_rights", canmod_rights)
context.addGlobal ("ro_rights", ro_rights)
context.addGlobal ("canmod_email", canmod_email)
context.addGlobal ("ro_email", ro_email)
context.addGlobal ("canmod_passwd", canmod_passwd)
context.addGlobal ("ro_passwd", ro_passwd)
context.addGlobal ("userid", userid )
context.addGlobal ("username", user.name )
context.addGlobal ("email", user.email )
context.addGlobal ("rights", user.rights )
context.addGlobal ("createdate", user.create_time )
slm = StyleListMaker.StyleListMaker()
slm.GetAll()
styles = []
for styleid in slm.list:
style = Style.Style(styleid)
entity = {}
entity['style'] = style.GetName()
entity['id'] = style.GetId()
styles.append(entity)
context.addGlobal ("styles", styles )
| gpl-2.0 | -5,218,146,967,847,114,000 | 26.773333 | 67 | 0.68555 | false |
CommonsDev/dataserver | commons/migrations/0006_auto__chg_field_pertinence_comment.py | 1 | 7545 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Pertinence.comment'
db.alter_column(u'commons_pertinence', 'comment', self.gf('django.db.models.fields.TextField')(null=True))
def backwards(self, orm):
# Changing field 'Pertinence.comment'
db.alter_column(u'commons_pertinence', 'comment', self.gf('django.db.models.fields.CharField')(default='', max_length=200))
models = {
'commons.pertinence': {
'Meta': {'object_name': 'Pertinence'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Project']"}),
'usage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['commons.Usage']"})
},
'commons.prestation': {
'Meta': {'object_name': 'Prestation'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'module': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'modules'", 'symmetrical': 'False', 'through': "orm['commons.SelectedModules']", 'to': "orm['commons.PrestationModule']"}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})
},
'commons.prestationmodule': {
'Meta': {'object_name': 'PrestationModule'},
'commonsretribution': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'commonsselected': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'prestation_module'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['projects.Project']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'providerretribution': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'providersupport': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})
},
'commons.selectedmodules': {
'Meta': {'object_name': 'SelectedModules'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modules': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['commons.PrestationModule']"}),
'prestation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['commons.Prestation']"})
},
'commons.usage': {
'Meta': {'object_name': 'Usage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'project': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['projects.Project']", 'through': "orm['commons.Pertinence']", 'symmetrical': 'False'})
},
u'projects.project': {
'Meta': {'object_name': 'Project'},
'baseline': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'begin_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['scout.Place']", 'null': 'True', 'blank': 'True'}),
'progress': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.ProjectProgress']", 'null': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'projects.projectprogress': {
'Meta': {'ordering': "['order']", 'object_name': 'ProjectProgress'},
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'progress_range': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.ProjectProgressRange']"})
},
u'projects.projectprogressrange': {
'Meta': {'object_name': 'ProjectProgressRange'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'name'", 'unique_with': '()'})
},
u'scout.place': {
'Meta': {'object_name': 'Place'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place'", 'to': u"orm['scout.PostalAddress']"}),
'geo': ('django.contrib.gis.db.models.fields.PointField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'scout.postaladdress': {
'Meta': {'object_name': 'PostalAddress'},
'address_locality': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'address_region': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post_office_box_number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'street_address': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['commons'] | agpl-3.0 | 8,106,612,999,738,941,000 | 70.188679 | 219 | 0.557323 | false |
edubecks/vaidecaronaorg | caronasbrasilapp/djangoapp/apps/caronasbrasil/migrations/0009_auto__del_field_caronamodel_date__add_field_caronamodel_from_datetime_.py | 1 | 3750 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'CaronaModel.date'
db.delete_column(u'caronasbrasil_caronamodel', 'date')
# Adding field 'CaronaModel.from_datetime'
db.add_column(u'caronasbrasil_caronamodel', 'from_datetime',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2013, 10, 30, 0, 0)),
keep_default=False)
# Adding field 'CaronaModel.to_datetime'
db.add_column(u'caronasbrasil_caronamodel', 'to_datetime',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2013, 10, 30, 0, 0)),
keep_default=False)
def backwards(self, orm):
# Adding field 'CaronaModel.date'
db.add_column(u'caronasbrasil_caronamodel', 'date',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2013, 10, 30, 0, 0)),
keep_default=False)
# Deleting field 'CaronaModel.from_datetime'
db.delete_column(u'caronasbrasil_caronamodel', 'from_datetime')
# Deleting field 'CaronaModel.to_datetime'
db.delete_column(u'caronasbrasil_caronamodel', 'to_datetime')
models = {
u'caronasbrasil.caronagroupmodel': {
'Meta': {'object_name': 'CaronaGroupModel'},
'city1': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'city1_list': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'city1_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'city2': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'city2_list': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'city2_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'fb_group_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'caronasbrasil.caronamodel': {
'Meta': {'object_name': 'CaronaModel'},
'destiny': ('django.db.models.fields.CharField', [], {'max_length': '33'}),
'fb_content': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'fb_group_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'fb_post_id': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'from_datetime': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_vagas': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'ofereco_procuro': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'origin': ('django.db.models.fields.CharField', [], {'max_length': '33'}),
'to_datetime': ('django.db.models.fields.DateTimeField', [], {})
},
u'caronasbrasil.parsererrorsmodel': {
'Meta': {'object_name': 'ParserErrorsModel'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'fb_group_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'fb_post_id': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['caronasbrasil'] | mit | 5,747,936,618,193,574,000 | 51.097222 | 118 | 0.574667 | false |
hveto/hveto | hveto/tests/test_triggers.py | 1 | 1896 | # -*- coding: utf-8 -*-
# Copyright (C) Joshua Smith (2016-)
#
# This file is part of the hveto python package.
#
# hveto is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# hveto is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with hveto. If not, see <http://www.gnu.org/licenses/>.
"""Tests for `hveto.triggers`
"""
import pytest
from astropy.table import Table
from gwpy.segments import (Segment, SegmentList)
from .. import triggers
AUX_FILES = {
'L1:GDS-CALIB_STRAIN': 'L1-GDS_CALIB_STRAIN_OMICRON-12345-67890.xml.gz',
'H1:SUS-BS_M1_MASTER_OUT_F2_DQ_0_DAC':
'H1-SUS_BS_M1_MASTER_OUT_F2_DQ_0_DAC-1126252143-22179.xml.gz',
}
def test_aux_channels_from_cache():
cache = list(AUX_FILES.values())
channels = triggers.find_auxiliary_channels(
'omicron', None, None, cache=cache)
assert channels == sorted(AUX_FILES.keys())
channels = triggers.find_auxiliary_channels(
'omicron', None, None, cache=cache)
assert channels == sorted(AUX_FILES.keys())
def test_get_triggers():
# test that trigfind raises a warning if the channel-level directory
# doesn't exist
with pytest.warns(UserWarning):
out = triggers.get_triggers('X1:DOES_NOT_EXIST', 'omicron',
SegmentList([Segment(0, 100)]))
# check output type and columns
assert isinstance(out, Table)
for col in ['time', 'frequency', 'snr']:
assert col in out.dtype.names
| gpl-3.0 | 4,191,869,665,966,519,000 | 32.263158 | 76 | 0.687236 | false |
davidecaminati/Handcart-lift-rotary | Python/facedetect_mio.py | 1 | 3991 | #!/usr/bin/env python
import numpy as np
import cv2
import cv2.cv as cv
from multiprocessing.pool import ThreadPool
from video import create_capture
from common import clock, draw_str
from pyfirmata import Arduino, util
ArduinoPresent = False
if ArduinoPresent :
board = Arduino('/dev/ttyACM0')
#board.digital[2].write(1)
#board.digital[4].write(1)
help_message = '''
USAGE: facedetect.py [--cascade <cascade_fn>] [--nested-cascade <cascade_fn>] [<video_source>]
'''
minsize_occhi = 60
def rotateImage(image, angle):
row,col = image.shape
center=tuple(np.array([row,col])/2)
rot_mat = cv2.getRotationMatrix2D(center,angle,1.0)
new_image = cv2.warpAffine(image, rot_mat, (col,row))
return new_image
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.2, minNeighbors=4, minSize=(minsize_occhi, minsize_occhi), flags = cv.CV_HAAR_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
return rects
def draw_rects(img, rects, color):
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
if __name__ == '__main__':
import sys, getopt
#print help_message
args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
try: video_src = video_src[0]
except: video_src = 0
args = dict(args)
#cascade_fn = args.get('--cascade', "../../data/haarcascades/haarcascade_frontalface_alt.xml")
#nested_fn = args.get('--nested-cascade', "../../data/haarcascades/haarcascade_eye.xml")
cascade_fn = args.get('--cascade', "../../data/haarcascades/haarcascade_eye.xml")
#nested_fn = args.get('--nested-cascade', "../../data/haarcascades/haarcascade_eye.xml")
cascade = cv2.CascadeClassifier(cascade_fn)
#nested = cv2.CascadeClassifier(nested_fn)
cam = create_capture(video_src, fallback='synth:bg=../cpp/lena.jpg:noise=0.05')
numero = 0
while True:
ret, img = cam.read()
#gray = img[200:400,100:400]
#gray = img[100:300,100:300]
gray = img[100:400,100:500]
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
t = clock()
rects = detect(gray, cascade)
vis = gray.copy()
draw_rects(vis, rects, (0, 255, 0))
if ArduinoPresent:
board.digital[4].write(0)
board.digital[2].write(0)
for x1, y1, x2, y2 in rects:
#roi = gray[y1:y2, x1:x2]
#vis_roi = vis[y1:y2, x1:x2]
numero = numero + 1
larghezza = x2-x1
altezza = y2-y1
'''
if x1 >= 150: #dx
if ArduinoPresent:
board.digital[2].write(1)
dx = cv2.getRectSubPix(vis, (larghezza, altezza),(x1+larghezza/2,y1+altezza/2))
cv2.imshow('dx', dx)
'''
if ArduinoPresent:
board.digital[4].write(1)
sx = cv2.getRectSubPix(vis, (larghezza, altezza),(x1+larghezza/2,y1+altezza/2))
#edges = cv2.Canny(sx,100,300)
#cv2.imshow('sx', edges)
cv2.imshow('sx', sx)
#file = "/home/pi/opencv-2.4.10/samples/python2/occhi/test_image" + str(numero) + ".png"
# A nice feature of the imwrite method is that it will automatically choose the
# correct format based on the file extension you provide. Convenient!
#cv2.imwrite(file, sx)
#subrects = detect(roi.copy(), nested)
#draw_rects(vis_roi, subrects, (255, 0, 0))
dt = clock() - t
draw_str(vis, (20, 20), 'time: %.1f ms' % (dt*1000))
cv2.imshow('facedetect', vis)
if 0xFF & cv2.waitKey(5) == 27:
break
cv2.destroyAllWindows()
| gpl-2.0 | -5,949,255,796,883,102,000 | 29.7 | 146 | 0.560261 | false |
peoplepower/composer-sdk-python | com.ppc.Microservices/intelligence/data_request/tools/download_data.py | 1 | 5155 | #!/usr/bin/env python
# encoding: utf-8
'''
Created on January 4, 2019
@author: David Moss
'''
# Data Stream Address
DATASTREAM_ADDRESS = "download_data"
# Data Stream Content
DATASTREAM_CONTENT = {
"force": True
}
# input function behaves differently in Python 2.x and 3.x. And there is no raw_input in 3.x.
if hasattr(__builtins__, 'raw_input'):
input=raw_input
import requests
import sys
import json
import logging
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
def main(argv=None):
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-u", "--username", dest="username", help="Username")
parser.add_argument("-p", "--password", dest="password", help="Password")
parser.add_argument("-s", "--server", dest="server", help="Base server URL (app.presencepro.com)")
parser.add_argument("-a", "--api_key", dest="apikey", help="User's API key instead of a username/password")
parser.add_argument("--httpdebug", dest="httpdebug", action="store_true", help="HTTP debug logger output");
# Process arguments
args = parser.parse_args()
# Extract the arguments
username = args.username
password = args.password
server = args.server
httpdebug = args.httpdebug
app_key = args.apikey
# Define the bot server
if not server:
server = "https://app.presencepro.com"
if "http" not in server:
server = "https://" + server
# HTTP Debugging
if httpdebug:
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
# You must initialize logging, otherwise you'll not see debug output.
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
# Login to your user account
if app_key is None:
app_key, user_info = _login(server, username, password)
send_datastream_message(server, app_key, DATASTREAM_ADDRESS, DATASTREAM_CONTENT)
print("Done!")
def send_datastream_message(server, app_key, address, content):
http_headers = {"API_KEY": app_key, "Content-Type": "application/json"}
params = {
"address": address,
"organizational": 1
}
body = {
"feed": content
}
print("Body: " + json.dumps(body, indent=2, sort_keys=True))
print("Server: " + server)
r = requests.post(server + "/cloud/appstore/stream/", params=params, data=json.dumps(body), headers=http_headers)
j = json.loads(r.text)
_check_for_errors(j)
print(str(r.text))
def _login(server, username, password):
"""Get an Bot API key and User Info by login with a username and password"""
if not username:
username = input('Email address: ')
if not password:
import getpass
password = getpass.getpass('Password: ')
try:
import requests
# login by username and password
http_headers = {"PASSWORD": password, "Content-Type": "application/json"}
r = requests.get(server + "/cloud/json/login", params={"username":username}, headers=http_headers)
j = json.loads(r.text)
_check_for_errors(j)
app_key = j['key']
# get user info
http_headers = {"PRESENCE_API_KEY": app_key, "Content-Type": "application/json"}
r = requests.get(server + "/cloud/json/user", headers=http_headers)
j = json.loads(r.text)
_check_for_errors(j)
return app_key, j
except BotError as e:
sys.stderr.write("Error: " + e.msg)
sys.stderr.write("\nCreate an account on " + server + " and use it to sign in")
sys.stderr.write("\n\n")
raise e
def _check_for_errors(json_response):
"""Check some JSON response for BotEngine errors"""
if not json_response:
raise BotError("No response from the server!", -1)
if json_response['resultCode'] > 0:
msg = "Unknown error!"
if 'resultCodeMessage' in json_response.keys():
msg = json_response['resultCodeMessage']
elif 'resultCodeDesc' in json_response.keys():
msg = json_response['resultCodeDesc']
raise BotError(msg, json_response['resultCode'])
del(json_response['resultCode'])
class BotError(Exception):
"""BotEngine exception to raise and log errors."""
def __init__(self, msg, code):
super(BotError).__init__(type(self))
self.msg = msg
self.code = code
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | -2,137,572,307,962,109,000 | 26.275132 | 117 | 0.60582 | false |
eonpatapon/lollypop | src/database_upgrade.py | 1 | 1934 | #!/usr/bin/python
# Copyright (c) 2014-2015 Cedric Bellegarde <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lollypop.sqlcursor import SqlCursor
class DatabaseUpgrade:
"""
Manage database schema upgrades
"""
def __init__(self, version, db):
"""
Init object
@param version as int
@param db as Database
"""
self._version = version
self._db = db
# Here are schema upgrade, key is database version,
# value is sql request
self._UPGRADES = {
1: "update tracks set duration=CAST(duration as INTEGER);",
2: "update albums set artist_id=-2001 where artist_id=-999;"
}
"""
Return upgrade count
@return int
"""
def count(self):
return len(self._UPGRADES)
"""
Upgrade database based on version
@return new db version as int
"""
def do_db_upgrade(self):
with SqlCursor(self._db) as sql:
for i in range(self._version+1, len(self._UPGRADES)+1):
try:
sql.execute(self._UPGRADES[i])
except Exception as e:
print("Database upgrade failed: ", e)
sql.commit()
return len(self._UPGRADES)
| gpl-3.0 | 8,553,216,954,296,847,000 | 33.535714 | 76 | 0.61272 | false |
Linaro/squad | squad/frontend/comparison.py | 1 | 4547 | from functools import reduce
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator
from django.db.models import Q, Prefetch
from squad.core.models import Project, Group, Build
from squad.core.comparison import TestComparison, MetricComparison
from squad.frontend.utils import alphanum_sort
RESULT_STATES = ['pass', 'fail', 'xfail', 'skip', 'n/a']
TRANSITIONS = {(_from, _to): False for _from in RESULT_STATES for _to in RESULT_STATES}
DEFAULT_CHECKED_TRANSITIONS = [('pass', 'fail'), ('fail', 'pass')]
def __get_comparison_class(comparison_type):
if 'metric' == comparison_type:
return MetricComparison
else:
return TestComparison
def __paginate(results, request):
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
paginator = Paginator(tuple(results.items()), 50)
return paginator.page(page)
def __get_transitions(request):
transitions = TRANSITIONS.copy()
marked_transitions = request.GET.getlist('transitions', [])
if 'ignore' in marked_transitions:
return {}
if len(marked_transitions) > 0:
for t in marked_transitions:
if t is None or t == 'None':
continue
_from, _to = t.split(':')
if _from in RESULT_STATES and _to in RESULT_STATES:
transitions[(_from, _to)] = True
else:
for default in DEFAULT_CHECKED_TRANSITIONS:
transitions[(default[0], default[1])] = True
return transitions
def compare_projects(request):
comparison = None
group = None
projects = None
comparison_type = request.GET.get('comparison_type', 'test')
transitions = __get_transitions(request)
group_slug = request.GET.get('group')
if group_slug:
group = get_object_or_404(Group, slug=group_slug)
qs = group.projects.accessible_to(request.user).prefetch_related(
Prefetch('builds', queryset=Build.objects.order_by('-datetime'))
)
projects = alphanum_sort(qs, 'slug')
filters = []
for key, value in request.GET.items():
if 'project_' in key and len(key.split('_')) == 2:
project_id = key.split('_')[1]
filters.append(Q(project_id=project_id) & Q(version=value))
if len(filters) > 1:
build_filters = reduce(lambda x, y: x | y, filters)
builds = Build.objects.filter(build_filters)
comparison_class = __get_comparison_class(comparison_type)
comparison = comparison_class.compare_builds(*builds)
if comparison_type == 'test' and len(transitions):
comparison.apply_transitions([t for t, checked in transitions.items() if checked])
comparison.results = __paginate(comparison.results, request)
context = {
'group': group,
'projects': projects,
'comparison': comparison,
'comparison_type': comparison_type,
'transitions': transitions,
}
return render(request, 'squad/compare_projects.jinja2', context)
def compare_test(request):
context = {}
return render(request, 'squad/compare.jinja2', context)
def compare_builds(request):
project_slug = request.GET.get('project')
comparison_type = request.GET.get('comparison_type', 'test')
transitions = __get_transitions(request)
comparison = None
project = None
if project_slug:
group_slug, project_slug = project_slug.split('/')
project = get_object_or_404(Project, group__slug=group_slug, slug=project_slug)
baseline_build = request.GET.get('baseline')
target_build = request.GET.get('target')
if baseline_build and target_build:
baseline = get_object_or_404(project.builds, version=baseline_build)
target = get_object_or_404(project.builds, version=target_build)
comparison_class = __get_comparison_class(comparison_type)
comparison = comparison_class.compare_builds(baseline, target)
if comparison_type == 'test' and len(transitions):
comparison.apply_transitions([t for t, checked in transitions.items() if checked])
comparison.results = __paginate(comparison.results, request)
context = {
'project': project,
'comparison': comparison,
'comparison_type': comparison_type,
'transitions': transitions,
}
return render(request, 'squad/compare_builds.jinja2', context)
| agpl-3.0 | -3,463,290,576,986,732,000 | 32.932836 | 98 | 0.635144 | false |
harshavardhana/minio-py | tests/unit/minio_test.py | 1 | 3865 | # -*- coding: utf-8 -*-
# Minio Python Library for Amazon S3 Compatible Cloud Storage, (C) 2015 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import platform
from unittest import TestCase
from nose.tools import raises, eq_
from minio import Minio, __version__
from minio.api import _DEFAULT_USER_AGENT
from minio.error import InvalidEndpointError, InvalidBucketError
from minio.helpers import get_target_url, is_valid_bucket_name
class ValidBucketName(TestCase):
@raises(InvalidBucketError)
def test_bucket_name(self):
is_valid_bucket_name('bucketName')
@raises(InvalidBucketError)
def test_bucket_name_invalid_characters(self):
is_valid_bucket_name('$$$bcuket')
@raises(InvalidBucketError)
def test_bucket_name_length(self):
is_valid_bucket_name('dd')
@raises(InvalidBucketError)
def test_bucket_name_periods(self):
is_valid_bucket_name('dd..mybucket')
@raises(InvalidBucketError)
def test_bucket_name_begins_period(self):
is_valid_bucket_name('.ddmybucket')
class GetURLTests(TestCase):
def test_get_target_url_works(self):
url = 'http://localhost:9000'
eq_(get_target_url(url, 'bucket-name'),
'http://localhost:9000/bucket-name/')
eq_(get_target_url(url, 'bucket-name', 'objectName'),
'http://localhost:9000/bucket-name/objectName')
eq_(get_target_url(url, 'bucket-name', 'objectName', None),
'http://localhost:9000/bucket-name/objectName')
eq_(get_target_url(url, 'bucket-name', 'objectName', 'us-east-1',
{'foo': 'bar'}),
'http://localhost:9000/bucket-name/objectName?foo=bar')
eq_(get_target_url(url, 'bucket-name', 'objectName', 'us-east-1',
{'foo': 'bar',
'b': 'c',
'a': 'b'}),
'http://localhost:9000/bucket-name/objectName?a=b&b=c&foo=bar')
# S3 urls.
s3_url = 'https://s3.amazonaws.com'
eq_(get_target_url(s3_url), 'https://s3.amazonaws.com/')
eq_(get_target_url(s3_url, 'my.bucket.name'),
'https://s3.amazonaws.com/my.bucket.name/')
eq_(get_target_url(s3_url,
'bucket-name',
'objectName',
'us-west-2', None),
'https://bucket-name.s3-us-west-2.amazonaws.com/objectName')
@raises(TypeError)
def test_minio_requires_string(self):
Minio(10)
@raises(InvalidEndpointError)
def test_minio_requires_hostname(self):
Minio('http://')
class UserAgentTests(TestCase):
def test_default_user_agent(self):
client = Minio('localhost')
eq_(client._user_agent, _DEFAULT_USER_AGENT)
def test_set_app_info(self):
client = Minio('localhost')
expected_user_agent = _DEFAULT_USER_AGENT + ' hello/1.0.2'
client.set_app_info('hello', '1.0.2')
eq_(client._user_agent, expected_user_agent)
@raises(ValueError)
def test_set_app_info_requires_non_empty_name(self):
client = Minio('localhost:9000')
client.set_app_info('', '1.0.2')
@raises(ValueError)
def test_set_app_info_requires_non_empty_version(self):
client = Minio('localhost:9000')
client.set_app_info('hello', '')
| apache-2.0 | -1,125,535,915,757,430,300 | 36.892157 | 83 | 0.62458 | false |
EnviroCentre/jython-upgrade | jython/lib/site-packages/pip/download.py | 1 | 26290 | import cgi
import email.utils
import hashlib
import getpass
import mimetypes
import os
import platform
import re
import shutil
import sys
import tempfile
import pip
from pip.backwardcompat import urllib, urlparse, raw_input
from pip.exceptions import InstallationError, HashMismatch
from pip.util import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
create_download_cache_folder, cache_download)
from pip.vcs import vcs
from pip.log import logger
from pip._vendor import requests, six
from pip._vendor.requests.adapters import BaseAdapter
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.compat import IncompleteRead
from pip._vendor.requests.exceptions import InvalidURL, ChunkedEncodingError
from pip._vendor.requests.models import Response
from pip._vendor.requests.structures import CaseInsensitiveDict
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url', 'unpack_http_url']
def user_agent():
"""Return a string representing the user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([
_implementation_version,
sys.pypy_version_info.releaselevel,
])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['pip/%s' % pip.__version__,
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True):
self.prompting = prompting
self.passwords = {}
def __call__(self, req):
parsed = urlparse.urlparse(req.url)
# Get the netloc without any embedded credentials
netloc = parsed.netloc.split("@", 1)[-1]
# Set the url of the request to the url without any credentials
req.url = urlparse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
# Extract credentials embedded in the url if we have none stored
if username is None:
username, password = self.parse_credentials(parsed.netloc)
if username or password:
# Store the username and password
self.passwords[netloc] = (username, password)
# Send the basic auth with this request
req = HTTPBasicAuth(username or "", password or "")(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simple return the response
if not self.prompting:
return resp
parsed = urlparse.urlparse(resp.url)
# Prompt the user for a new username and password
username = raw_input("User for %s: " % parsed.netloc)
password = getpass.getpass("Password: ")
# Store the new username and password to use for future requests
if username or password:
self.passwords[parsed.netloc] = (username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
class LocalFSResponse(object):
def __init__(self, fileobj):
self.fileobj = fileobj
def __getattr__(self, name):
return getattr(self.fileobj, name)
def read(self, amt=None, decode_content=None, cache_content=False):
return self.fileobj.read(amt)
# Insert Hacks to Make Cookie Jar work w/ Requests
@property
def _original_response(self):
class FakeMessage(object):
def getheaders(self, header):
return []
def get_all(self, header, default):
return []
class FakeResponse(object):
@property
def msg(self):
return FakeMessage()
return FakeResponse()
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
parsed_url = urlparse.urlparse(request.url)
# We only work for requests with a host of localhost
if parsed_url.netloc.lower() != "localhost":
raise InvalidURL(
"Invalid URL %r: Only localhost is allowed" %
request.url
)
real_url = urlparse.urlunparse(parsed_url[:1] + ("",) + parsed_url[2:])
pathname = url_to_path(real_url)
resp = Response()
resp.status_code = 200
resp.url = real_url
stats = os.stat(pathname)
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
resp.headers = CaseInsensitiveDict({
"Content-Type": mimetypes.guess_type(pathname)[0] or "text/plain",
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = LocalFSResponse(open(pathname, "rb"))
resp.close = resp.raw.close
return resp
def close(self):
pass
class PipSession(requests.Session):
timeout = None
def __init__(self, *args, **kwargs):
retries = kwargs.pop('retries', None)
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth()
# Configure retries
if retries:
http_adapter = requests.adapters.HTTPAdapter(max_retries=retries)
self.mount("http://", http_adapter)
self.mount("https://", http_adapter)
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
def request(self, method, url, *args, **kwargs):
# Make file:// urls not fail due to lack of a hostname
parsed = urlparse.urlparse(url)
if parsed.scheme == "file":
url = urlparse.urlunparse(parsed[:1] + ("localhost",) + parsed[2:])
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
if session is None:
session = PipSession()
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from
and comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
# FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
if six.PY3:
return resp.url, resp.text
else:
return resp.url, resp.content
try:
f = open(url)
content = f.read()
except IOError as exc:
raise InstallationError(
'Could not open requirements file: %s' % str(exc)
)
else:
f.close()
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
path = url[len('file:'):].lstrip('/')
path = urllib.unquote(path)
if _url_drive_re.match(path):
path = path[0] + ':' + path[2:]
else:
path = '/' + path
return path
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
drive, path = os.path.splitdrive(path)
filepath = path.split(os.path.sep)
url = '/'.join([urllib.quote(part) for part in filepath])
if not drive:
url = url.lstrip('/')
return 'file:///' + drive + url
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
archives = (
'.zip', '.tar.gz', '.tar.bz2', '.tgz', '.tar', '.pybundle', '.whl'
)
ext = splitext(name)[1].lower()
if ext in archives:
return True
return False
def unpack_vcs_link(link, location, only_download=False):
vcs_backend = _get_used_vcs_backend(link)
if only_download:
vcs_backend.export(location)
else:
vcs_backend.unpack(location)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def _check_hash(download_hash, link):
if download_hash.digest_size != hashlib.new(link.hash_name).digest_size:
logger.fatal(
"Hash digest size of the package %d (%s) doesn't match the "
"expected hash name %s!" %
(download_hash.digest_size, link, link.hash_name)
)
raise HashMismatch('Hash name mismatch for package %s' % link)
if download_hash.hexdigest() != link.hash:
logger.fatal(
"Hash of the package %s (%s) doesn't match the expected hash %s!" %
(link, download_hash.hexdigest(), link.hash)
)
raise HashMismatch(
'Bad %s hash for package %s' % (link.hash_name, link)
)
def _get_hash_from_file(target_file, link):
try:
download_hash = hashlib.new(link.hash_name)
except (ValueError, TypeError):
logger.warn(
"Unsupported hash name %s for package %s" % (link.hash_name, link)
)
return None
fp = open(target_file, 'rb')
while True:
chunk = fp.read(4096)
if not chunk:
break
download_hash.update(chunk)
fp.close()
return download_hash
def _download_url(resp, link, temp_location):
fp = open(temp_location, 'wb')
download_hash = None
if link.hash and link.hash_name:
try:
download_hash = hashlib.new(link.hash_name)
except ValueError:
logger.warn(
"Unsupported hash name %s for package %s" %
(link.hash_name, link)
)
try:
total_length = int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
downloaded = 0
show_progress = total_length > 40 * 1000 or not total_length
show_url = link.show_url
try:
if show_progress:
# FIXME: the URL can get really long in this message:
if total_length:
logger.start_progress(
'Downloading %s (%s): ' %
(show_url, format_size(total_length))
)
else:
logger.start_progress(
'Downloading %s (unknown size): ' % show_url
)
else:
logger.notify('Downloading %s' % show_url)
logger.info('Downloading from URL %s' % link)
def resp_read(chunk_size):
try:
# Special case for urllib3.
try:
for chunk in resp.raw.stream(
chunk_size,
# We use decode_content=False here because we do
# want urllib3 to mess with the raw bytes we get
# from the server. If we decompress inside of
# urllib3 then we cannot verify the checksum
# because the checksum will be of the compressed
# file. This breakage will only occur if the
# server adds a Content-Encoding header, which
# depends on how the server was configured:
# - Some servers will notice that the file isn't a
# compressible file and will leave the file alone
# and with an empty Content-Encoding
# - Some servers will notice that the file is
# already compressed and will leave the file
# alone and will add a Content-Encoding: gzip
# header
# - Some servers won't notice anything at all and
# will take a file that's already been compressed
# and compress it again and set the
# Content-Encoding: gzip header
#
# By setting this not to decode automatically we
# hope to eliminate problems with the second case.
decode_content=False):
yield chunk
except IncompleteRead as e:
raise ChunkedEncodingError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = resp.raw.read(chunk_size)
if not chunk:
break
yield chunk
for chunk in resp_read(4096):
downloaded += len(chunk)
if show_progress:
if not total_length:
logger.show_progress('%s' % format_size(downloaded))
else:
logger.show_progress(
'%3i%% %s' %
(
100 * downloaded / total_length,
format_size(downloaded)
)
)
if download_hash is not None:
download_hash.update(chunk)
fp.write(chunk)
fp.close()
finally:
if show_progress:
logger.end_progress('%s downloaded' % format_size(downloaded))
return download_hash
def _copy_file(filename, location, content_type, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(download_location), ('i', 'w', 'b'))
if response == 'i':
copy = False
elif response == 'w':
logger.warn('Deleting %s' % display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warn(
'Backing up %s to %s' %
(display_path(download_location), display_path(dest_file))
)
shutil.move(download_location, dest_file)
if copy:
shutil.copy(filename, download_location)
logger.notify('Saved %s' % display_path(download_location))
def unpack_http_url(link, location, download_cache, download_dir=None,
session=None):
if session is None:
session = PipSession()
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
temp_location = None
target_url = link.url.split('#', 1)[0]
already_cached = False
cache_file = None
cache_content_type_file = None
download_hash = None
# If a download cache is specified, is the file cached there?
if download_cache:
cache_file = os.path.join(
download_cache,
urllib.quote(target_url, '')
)
cache_content_type_file = cache_file + '.content-type'
already_cached = (
os.path.exists(cache_file) and
os.path.exists(cache_content_type_file)
)
if not os.path.isdir(download_cache):
create_download_cache_folder(download_cache)
# If a download dir is specified, is the file already downloaded there?
already_downloaded = None
if download_dir:
already_downloaded = os.path.join(download_dir, link.filename)
if not os.path.exists(already_downloaded):
already_downloaded = None
# If already downloaded, does its hash match?
if already_downloaded:
temp_location = already_downloaded
content_type = mimetypes.guess_type(already_downloaded)[0]
logger.notify('File was already downloaded %s' % already_downloaded)
if link.hash:
download_hash = _get_hash_from_file(temp_location, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warn(
'Previously-downloaded file %s has bad hash, '
're-downloading.' % temp_location
)
temp_location = None
os.unlink(already_downloaded)
already_downloaded = None
# If not a valid download, let's confirm the cached file is valid
if already_cached and not temp_location:
with open(cache_content_type_file) as fp:
content_type = fp.read().strip()
temp_location = cache_file
logger.notify('Using download cache from %s' % cache_file)
if link.hash and link.hash_name:
download_hash = _get_hash_from_file(cache_file, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warn(
'Cached file %s has bad hash, '
're-downloading.' % temp_location
)
temp_location = None
os.unlink(cache_file)
os.unlink(cache_content_type_file)
already_cached = False
# We don't have either a cached or a downloaded copy
# let's download to a tmp dir
if not temp_location:
try:
resp = session.get(
target_url,
# We use Accept-Encoding: identity here because requests
# defaults to accepting compressed responses. This breaks in
# a variety of ways depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible
# file and will leave the file alone and with an empty
# Content-Encoding
# - Some servers will notice that the file is already
# compressed and will leave the file alone and will add a
# Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take
# a file that's already been compressed and compress it again
# and set the Content-Encoding: gzip header
# By setting this to request only the identity encoding We're
# hoping to eliminate the third case. Hopefully there does not
# exist a server which when given a file will notice it is
# already compressed and that you're not asking for a
# compressed file and will then decompress it before sending
# because if that's the case I don't think it'll ever be
# possible to make this work.
headers={"Accept-Encoding": "identity"},
stream=True,
)
resp.raise_for_status()
except requests.HTTPError as exc:
logger.fatal("HTTP error %s while getting %s" %
(exc.response.status_code, link))
raise
content_type = resp.headers.get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
temp_location = os.path.join(temp_dir, filename)
download_hash = _download_url(resp, link, temp_location)
if link.hash and link.hash_name:
_check_hash(download_hash, link)
# a download dir is specified; let's copy the archive there
if download_dir and not already_downloaded:
_copy_file(temp_location, download_dir, content_type, link)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(temp_location, location, content_type, link)
# if using a download cache, cache it, if needed
if cache_file and not already_cached:
cache_download(cache_file, temp_location, content_type)
if not (already_cached or already_downloaded):
os.unlink(temp_location)
os.rmdir(temp_dir)
def unpack_file_url(link, location, download_dir=None):
link_path = url_to_path(link.url_without_fragment)
already_downloaded = False
# If it's a url to a local directory
if os.path.isdir(link_path):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
return
# if link has a hash, let's confirm it matches
if link.hash:
link_path_hash = _get_hash_from_file(link_path, link)
_check_hash(link_path_hash, link)
# If a download dir is specified, is the file already there and valid?
if download_dir:
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
content_type = mimetypes.guess_type(download_path)[0]
logger.notify('File was already downloaded %s' % download_path)
if link.hash:
download_hash = _get_hash_from_file(download_path, link)
try:
_check_hash(download_hash, link)
already_downloaded = True
except HashMismatch:
logger.warn(
'Previously-downloaded file %s has bad hash, '
're-downloading.' % link_path
)
os.unlink(download_path)
else:
already_downloaded = True
if already_downloaded:
from_path = download_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified and not already downloaded
if download_dir and not already_downloaded:
_copy_file(from_path, download_dir, content_type, link)
| mit | -5,345,007,701,288,024,000 | 35.0631 | 79 | 0.568581 | false |
cartologic/cartoview | cartoview/apps_handler/config.py | 1 | 4552 | # -*- coding: utf-8 -*-
import json
import os
from collections import Mapping
import portalocker
class AppsDict(Mapping):
def __init__(self, *args, **kw):
self._app_data = dict(*args, **kw)
def __setitem__(self, key, item):
self._app_data[key] = item
self.__sort_apps()
def __getitem__(self, key):
return self._app_data[key]
def __repr__(self):
return repr(self._app_data)
def __len__(self):
return len(self._app_data)
def __delitem__(self, key):
del self._app_data[key]
def clear(self):
return self._app_data.clear()
def copy(self):
return self._app_data.copy()
def has_key(self, k):
return k in self._app_data
def update(self, *args, **kwargs):
self._app_data.update(*args, **kwargs)
self.__sort_apps()
def keys(self):
return self._app_data.keys()
def values(self):
return self._app_data.values()
def items(self):
return self._app_data.items()
def pop(self, *args):
return self._app_data.pop(*args)
def __cmp__(self, dict_):
return self.__cmp__(self._app_data, dict_)
def __contains__(self, item):
return item in self._app_data
def __iter__(self):
return iter(self._app_data)
def __unicode__(self):
return str(repr(self._app_data))
def __sort_apps(self):
self._app_data = dict(
sorted(self._app_data.items(), key=lambda item: item[1].order))
def to_json(self):
data = {k: v.to_dict() for k, v in self._app_data.items()}
return json.dumps(data, indent=4, sort_keys=True)
def from_json(self, data):
def cartoview_app_dict(name, data):
d = {'name': name}
d.update(data)
return d
try:
apps = json.loads(data)
self._app_data = {
k: CartoviewApp(cartoview_app_dict(k, v))
for k, v in apps.items()
}
self.__sort_apps()
return self._app_data
except BaseException:
return AppsDict()
def get_active_apps(self):
return {k: v for k, v in self._app_data.items() if v.active}
def get_pending_apps(self):
return {k: v for k, v in self._app_data.items() if v.pending}
def app_exists(self, app_name):
return self._app_data.get(app_name, None)
class CartoviewApp(object):
app_attrs = frozenset(['name', 'active', 'pending', 'order'])
objects = AppsDict()
apps_dir = None
def __init__(self, data):
if not data and isinstance(data, dict):
raise ValueError("data must be dict type")
for k, v in data.items():
setattr(self, k, v)
self._validate()
self.cleanup()
self.commit()
def _validate(self):
for attr in CartoviewApp.app_attrs:
if not hasattr(self, attr):
raise ValueError('attr {} not found'.format(attr))
def cleanup(self):
for attr in vars(self).keys():
if attr not in [
'objects', 'app_attrs'
] and attr not in CartoviewApp.app_attrs and (
not attr.startswith('_') and not attr.startswith('_')):
delattr(self, attr)
def __setattr__(self, name, value):
if name == ['objects', 'app_attrs']:
raise ValueError("{} should be altered using classname")
if name not in CartoviewApp.app_attrs:
raise AttributeError("attribute '{}' not found ".format(name))
super(CartoviewApp, self).__setattr__(name, value)
def to_dict(self):
return {
k: getattr(self, k)
for k in CartoviewApp.app_attrs if k != 'name'
}
@classmethod
def get_apps_json_path(cls):
return os.path.join(cls.apps_dir, 'apps.json')
def commit(self):
CartoviewApp.objects.update({self.name: self})
return self
@classmethod
def load(cls):
if os.path.exists(cls.get_apps_json_path()):
with portalocker.Lock(
cls.get_apps_json_path(), 'r',
portalocker.LOCK_EX) as jf:
data = jf.read()
CartoviewApp.objects.from_json(data)
@classmethod
def save(cls):
with portalocker.Lock(
cls.get_apps_json_path(), 'w',
portalocker.LOCK_EX) as jf:
data = CartoviewApp.objects.to_json()
jf.write(data)
| bsd-2-clause | -8,725,916,909,306,204,000 | 26.92638 | 75 | 0.541301 | false |
sevenian3/ChromaStarPy | solartest.py | 1 | 6462 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 10:54:21 2017
@author: ishort
"""
#plotting:
import matplotlib
import matplotlib.pyplot as plt
#%matplotlib inline
import pylab
#General file for printing ad hoc quantities
#dbgHandle = open("debug.out", 'w')
#Get the data
dataPath = "SolFluxAtlas2005/"
#outPath = absPath + "Outputs/"
numStr = ""
num = 0.0
wavStr = ""
flxStr = ""
inLine = ""
fields = [" " for i in range(2)]
#with open("", 'r', encoding='utf-8') as inputHandle:
inFile = dataPath + "fluxspliced.2005"
with open(inFile, 'r') as inputHandle:
#Expects number of records on first lines, then white space delimited columns of
#wavelengths in nm and continuum rectified fluxes
inLine = inputHandle.readline() #Special one-line header
print(inLine)
fields = inLine.split()
numStr = fields[0].strip() #first field is number of following records
num = int(numStr)
waveSun = [0.0 for i in range(num)]
fluxSun = [0.0 for i in range(num)]
for i in range(num):
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
waveSun[i] = float(wavStr); fluxSun[i] = float(flxStr)
pylab.plot(waveSun, fluxSun, color='black')
#Now get the synthetic spectrum pre-computed with ChromaStarPy
modelPath = "Outputs/"
#outPath = absPath + "Outputs/"
numStr = ""
num = 0.0
wavStr = ""
flxStr = ""
inLine = " "
#fields = [" " for i in range(2)]
"""
runVers = "pyLoop"
#Model atmosphere
teffStr = "5777.0"
loggStr = "4.44"
logZStr = "0.0"
massStarStr = "1.0"
xiTStr = "1.0"
logHeFeStr = "0.0"
logCOStr = "0.0"
logAlphaFeStr = "0.0"
#Spectrum synthesis
lambdaStartStr = "390.0"
lambdaStopStr = "400.0"
lineThreshStr = "-3.0"
voigtThreshStr = "-3.0"
logGammaColStr = "0.5"
logKapFudgeStr = "0.0"
macroVStr = "1.0"
rotVStr = "2.0"
rotIStr = "90.0"
RVStr = "0.0"
strucStem = "Teff" + teffStr + "Logg" + loggStr + "Z" + logZStr + "M" + massStarStr+"xiT"+xiTStr + \
"HeFe" + logHeFeStr + "CO" + logCOStr + "AlfFe" + logAlphaFeStr + "v" + runVers
strucFile = "struc." + strucStem + ".out"
specFile = "spec." + strucStem + "L"+lambdaStartStr+"-"+lambdaStopStr+"xiT"+xiTStr+"LThr"+lineThreshStr+ \
"GamCol"+logGammaColStr+"Mac"+macroVStr+"Rot"+rotVStr+"-"+rotIStr+"RV"+RVStr + ".out"
#with open("", 'r', encoding='utf-8') as inputHandle:
inFile = modelPath + specFile;
"""
project = "Project"
runVers = "Run"
teff = 5777.0
logg = 4.44
log10ZScale = 0.0
lambdaStart = 390.0
lambdaStop = 400.0
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
inFile = modelPath + fileStem + ".spec.txt"
invnAir = 1.0 / 1.000277 #// reciprocal of refractive index of air at STP
#numStr = fields[0].strip() #first field is number of following records
#num = int(numStr)
waveMod = []
fluxMod = []
wav = 0.0 #//initialization
wavStr = ""
lblStr = ""
with open(inFile, 'r') as inputHandle:
#Expects number of records on first lines, then white space delimited columns of
#wavelengths in nm and continuum rectified fluxes
inLine = inputHandle.readline() #line of header
print(inLine)
inLine = inputHandle.readline()
print(inLine)
fields = inLine.split()
#number of line IDs is last field:
numLineIdsStr = fields[len(fields)-1]
numLineIds = int(numLineIdsStr) - 1 # to be on safe side
print("Recovered that there are " + numLineIdsStr + " lines to ID")
inLine = inputHandle.readline()
print(inLine)
fields = inLine.split()
#number of wavelengths in spectrum is last field:
numWavsStr = fields[len(fields)-1]
numWavs = int(numWavsStr) # to be on safe side
print("Recovered that there are " + numWavsStr + " wavelengths")
#One more line of header
inLine = inputHandle.readline() #line of header
print(inLine)
waveMod = [0.0 for i in range(numWavs)]
fluxMod = [0.0 for i in range(numWavs)]
#Get the synthetic spectrum
for i in range(numWavs):
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
wav = invnAir * float(wavStr)
waveMod[i] = wav
fluxMod[i] = float(flxStr)
waveIds = [0.0 for i in range(numLineIds)]
lblIds = ["" for i in range(numLineIds)]
#Get the line IDs
#Expects four white-space-delimited fields:
# wavelength, element, ion. stage, and rounded wavelength
#Another line of header for line id section
inLine = inputHandle.readline() #line of header
print(inLine)
for i in range(numLineIds):
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip()
wav = invnAir * float(wavStr)
waveIds[i] = wav
lblStr = fields[1].strip() + " " + fields[2].strip() + " " + fields[3].strip()
lblIds[i] = lblStr
"""
#If we do NOT know number of records:
#for i in inputHandle: #doesn't work - 0 iterations
while (inLine != ""):
inLine = inputHandle.readline()
if not inLine:
break
#print(inLine)
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
wav = invnAir * float(wavStr)
waveMod.append(wav)
fluxMod.append(float(flxStr))
"""
#plot the spectrum
#plt.title('Synthetic spectrum')
plt.ylabel('$F_\lambda/F^C_\lambda$')
plt.xlabel('$\lambda$ (nm)')
xMin = min(waveMod)
xMax = max(waveMod)
pylab.xlim(xMin, xMax)
pylab.ylim(0.0, 1.6)
pylab.plot(waveMod, fluxMod, color="gray")
#add the line IDs
for i in range(numLineIds):
if "Ca II" in lblIds[i]:
thisLam = waveIds[i]
thisLbl = lblIds[i]
xPoint = [thisLam, thisLam]
yPoint = [1.05, 1.1]
pylab.plot(xPoint, yPoint, color='black')
pylab.text(thisLam, 1.5, thisLbl, rotation=270)
#Save as encapsulated postscript (eps) for LaTex
epsName = fileStem + ".eps"
plt.savefig(epsName, format='eps', dpi=1000) | mit | 3,639,136,743,345,214,000 | 28.780952 | 106 | 0.604302 | false |
lunixbochs/uberserver | server.py | 1 | 4578 | #!/usr/bin/env python
# coding=utf-8
import thread, traceback, signal, socket, sys
from urllib import urlopen
from DataHandler import DataHandler
from Client import Client
from NATServer import NATServer
from Dispatcher import Dispatcher
import ip2country # just to make sure it's downloaded
import ChanServ
_root = DataHandler()
_root.parseArgv(sys.argv)
try:
signal.SIGHUP
def sighup(sig, frame):
_root.console_write('Received SIGHUP.')
if _root.sighup:
_root.reload()
signal.signal(signal.SIGHUP, sighup)
except AttributeError:
pass
_root.console_write('-'*40)
_root.console_write('Starting uberserver...\n')
host = ''
port = _root.port
natport = _root.natport
backlog = 100
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR,
server.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) | 1 )
# fixes TIME_WAIT :D
server.bind((host,port))
server.listen(backlog)
try:
natserver = NATServer(natport)
thread.start_new_thread(natserver.start,())
natserver.bind(_root)
except socket.error:
print 'Error: Could not start NAT server - hole punching will be unavailable.'
_root.console_write()
_root.console_write('Detecting local IP:')
try: local_addr = socket.gethostbyname(socket.gethostname())
except: local_addr = '127.0.0.1'
_root.console_write(local_addr)
_root.console_write('Detecting online IP:')
try:
timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(5)
web_addr = urlopen('http://automation.whatismyip.com/n09230945.asp').read()
socket.setdefaulttimeout(timeout)
_root.console_write(web_addr)
except:
web_addr = local_addr
_root.console_write('not online')
_root.console_write()
_root.local_ip = local_addr
_root.online_ip = web_addr
_root.console_write('Listening for clients on port %i'%port)
_root.console_write('Using %i client handling thread(s).'%_root.max_threads)
dispatcher = Dispatcher(_root, server)
_root.dispatcher = dispatcher
chanserv = True
if chanserv:
address = ((web_addr or local_addr), 0)
chanserv = ChanServ.ChanServClient(_root, address, _root.session_id)
dispatcher.addClient(chanserv)
_root.chanserv = chanserv
try:
dispatcher.pump()
except KeyboardInterrupt:
_root.console_write()
_root.console_write('Server killed by keyboard interrupt.')
except:
_root.error(traceback.format_exc())
_root.console_write('Deep error, exiting...')
# _root.console_write('Killing handlers.')
# for handler in _root.clienthandlers:
# handler.running = False
_root.console_write('Killing clients.')
for client in dict(_root.clients):
try:
conn = _root.clients[client].conn
if conn: conn.close()
except: pass # for good measure
server.close()
_root.running = False
_root.console_print_step()
if _root.dbtype == 'legacy':
print 'Writing account database to file...'
try:
while True:
try:
_root.userdb.writeAccounts()
print 'Accounts written.'
if _root.channelfile:
print 'Writing channels...'
__import__('tasserver').LegacyChannels.Writer().dump(_root.channels, _root.getUserDB().clientFromID)
print 'Channels written.'
_root.channelfile = None
break
except KeyboardInterrupt:
print 'You probably shouldn\'t interrupt this, starting account dump over.'
except:
print '-'*60
print traceback.format_exc()
print '-'*60
memdebug = False
if memdebug:
recursion = []
names = {}
def dump(obj, tabs=''):
if obj in recursion: return str(obj)
else: recursion.append(obj)
try:
if type(obj) == (list, set):
return [dump(var) for var in obj]
elif type(obj) in (str, unicode, int, float):
return obj
elif type(obj) == dict:
output = {}
for key in obj:
output[key] = dump(obj[key], tabs+'\t')
else:
output = {}
ovars = vars(obj)
for key in ovars:
if key in names: names[key] += 1
else: names[key] = 1
output[key] = dump(ovars[key], tabs+'\t')
return '\n'.join(['%s%s:\n%s\t%s' % (tabs, key, tabs, output[key]) for key in output]) if output else {}
except: return 'no __dict__'
print 'Dumping memleak info.'
f = open('dump.txt', 'w')
f.write(dump(_root))
f.close()
counts = {}
for name in names:
count = names[name]
if count in counts:
counts[count].append(name)
else:
counts[count] = [name]
f = open('counts.txt', 'w')
for key in reversed(sorted(counts)):
f.write('%s: %s\n' % (key, counts[key]))
f.close() | mit | -7,224,281,884,972,797,000 | 24.783626 | 107 | 0.669506 | false |
zooko/egtp_new | egtp/CommHints.py | 1 | 2565 | # Copyright (c) 2001 Autonomous Zone Industries
# This file is licensed under the
# GNU Lesser General Public License v2.1.
# See the file COPYING or visit http://www.gnu.org/ for details.
__revision__ = "$Id: CommHints.py,v 1.2 2002/12/02 19:58:44 myers_carpenter Exp $"
### standard modules
import types
# The following hints can be passed to `send_msg()' to allow the comms handler to optimize
# usage of the underlying communication system. A correct comms handler implementation
# could, of course, ignore these hints, and the comms handler should not fail to send a
# message, send it to the wrong counterparty, or otherwise do something incorrect no matter
# what hints are passed.
# This hint means that you expect an immediate response. For example, the TCPCommsHandler
# holds the connection open after sending until it gets a message on that connection, then
# closes it. (Unless HINT_EXPECT_MORE_TRANSACTIONS is also passed, in which case see
# below.)
HINT_EXPECT_RESPONSE = 1
# This hint means that you expect to send and receive messages with this counterparty in the
# near future. (Who knows what that means? This is just a hint.) For example, the
# TCPCommsHandler holds the connection open after sending unless it has too many open
# connections, in which case it closes it.
HINT_EXPECT_MORE_TRANSACTIONS = 2
# For example, if both HINT_EXPECT_RESPONSE and HINT_EXPECT_MORE_TRANSACTIONS are passed,
# then the TCPCommsHandler holds the connection open until it receives a message on that
# connection, then reverts to HINT_EXPECT_MORE_TRANSACTIONS -style mode in which it keeps
# the connection open unless it has too many open connections.
# This hint means that you expect no more messages to or from this counterparty. For
# example, the TCPCommsHandler closes the connection immediately after sending the message.
# If you pass both HINT_EXPECT_NO_MORE_COMMS and one of the previous hints then you are
# silly.
HINT_EXPECT_NO_MORE_COMMS = 4
# This hint means that you are going to send something. For example, the TCPCommsHandler
# holds open a connection after it receives a query and then closed it after sending the reply.
HINT_EXPECT_TO_RESPOND = 8
# This hint, when passed with a call to `send()' indicates that the message is a response to an
# earlier received query.
HINT_THIS_IS_A_RESPONSE = 16
HINT_NO_HINT = 0
def is_hint(thingie, IntType=types.IntType, LongType=types.LongType):
if not type(thingie) in (IntType, LongType,):
return 0 # `false'
return (thingie >= 0) and (thingie < 32)
| lgpl-2.1 | 629,575,291,455,173,500 | 46.5 | 95 | 0.759454 | false |
SatAgro/ecoclima | ecoclima_parser/init_all.py | 1 | 2540 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import psycopg2
import sys
def init_all(db_name, user, host, password):
try:
conn = psycopg2.connect("dbname='" + db_name + "' user='" + user +
"' host='" + host + "' password='" + password +
"'")
cur = conn.cursor()
cur.execute("""DROP TABLE IF EXISTS """ + 'stations')
cur.execute("""CREATE TABLE """ + 'stations' +
"""(id Serial, name Text,
lat REAL, lon REAL, owner TEXT, url TEXT)""")
cur.execute("""DROP TABLE IF EXISTS """ + 'measures')
cur.execute("""CREATE TABLE """ + 'measures' +
"""(
station_id INTEGER,
m_date DATE,
m_time TIME,
temp_out REAL,
hi_temp REAL,
low_temp REAL,
out_hum INTEGER,
dew_pt REAL,
wind_speed REAL,
wind_dir TEXT,
wind_run REAL,
hi_speed REAL,
hi_dir TEXT,
wind_chill REAL,
heat_index REAL,
thw_index REAL,
bar REAL,
rain REAL,
rain_rate REAL,
uv_index REAL,
uv_dose REAL,
hi_uv REAL,
heat_dd REAL,
cool_dd REAL,
in_temp REAL,
in_hum INTEGER,
in_dew REAL,
in_heat REAL,
in_emc REAL,
in_air_density REAL,
soil_moist INTEGER,
soil_temp REAL,
leaf_wet INTEGER,
wind_samp REAL,
wind_tx INTEGER,
iss_recept REAL,
arc_int INTEGER,
CONSTRAINT """ +
"""station_time_unique UNIQUE (station_id, m_date, m_time))""")
cur.close()
conn.commit()
conn.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
raise
if __name__ == '__main__':
init_all(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
print ("tables for stations and measures have been created")
| lgpl-3.0 | -574,160,321,714,270,700 | 34.774648 | 83 | 0.385827 | false |
ferdisdot/elbe | elbepack/pkgutils.py | 1 | 7605 | # ELBE - Debian Based Embedded Rootfilesystem Builder
# Copyright (C) 2013 Linutronix GmbH
#
# This file is part of ELBE.
#
# ELBE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ELBE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ELBE. If not, see <http://www.gnu.org/licenses/>.
import os
from tempfile import mkdtemp
import urllib2
import hashlib
from elbepack.shellhelper import CommandError
try:
from elbepack import virtapt
from apt_pkg import TagFile
virtapt_imported = True
except ImportError:
print "WARNING - python-apt not available: if there are multiple versions of"
print " elbe-bootstrap packages on the mirror(s) elbe selects the first package it"
print " has found. There is no guarantee that the latest package is used."
print " To ensure this, the python-apt package needs to be installed."
import urllib2
virtapt_imported = False
class NoKinitrdException(Exception):
pass
def get_sources_list( prj, defs ):
suite = prj.text("suite")
slist = ""
if prj.has("mirror/primary_host"):
mirror = "%s://%s/%s" % ( prj.text("mirror/primary_proto"), prj.text("mirror/primary_host"), prj.text("mirror/primary_path") )
slist += "deb %s %s main\n" % (mirror, suite)
slist += "deb-src %s %s main\n" % (mirror, suite)
if prj.has("mirror/cdrom"):
tmpdir = mkdtemp()
kinitrd = prj.text("buildimage/kinitrd", default=defs, key="kinitrd")
os.system( '7z x -o%s "%s" pool/main/%s/%s dists' % (tmpdir, prj.text("mirror/cdrom"), kinitrd[0], kinitrd) )
slist += "deb file://%s %s main\n" % (tmpdir,suite)
if prj.node("mirror/url-list"):
for n in prj.node("mirror/url-list"):
if n.has("binary"):
tmp = n.text("binary").replace("LOCALMACHINE", "localhost")
slist += "deb %s\n" % tmp.strip()
if n.has("source"):
tmp = n.text("source").replace("LOCALMACHINE", "localhost")
slist += "deb-src %s\n" % tmp.strip()
return slist
def get_key_list (prj):
retval = []
if prj.node("mirror/url-list"):
for n in prj.node("mirror/url-list"):
if n.has("key"):
tmp = n.text("key").replace("LOCALMACHINE", "localhost")
retval.append (tmp.strip ())
return retval
def get_initrd_pkg( prj, defs ):
initrdname = prj.text("buildimage/kinitrd", default=defs, key="kinitrd")
return initrdname
def get_url ( arch, suite, target_pkg, mirror ):
try:
packages = urllib2.urlopen("%s/dists/%s/main/binary-%s/Packages" %
(mirror.replace("LOCALMACHINE", "localhost"), suite, arch))
packages = packages.readlines()
packages = filter( lambda x: x.startswith( "Filename" ), packages )
packages = filter( lambda x: x.find( target_pkg ) != -1, packages )
tmp = packages.pop()
urla = tmp.split()
url = "%s/%s" % (mirror.replace("LOCALMACHINE", "localhost"), urla[1])
except IOError:
url = ""
except IndexError:
url = ""
return url
def get_initrd_uri( prj, defs, arch ):
if arch == "default":
arch = prj.text("buildimage/arch", default=defs, key="arch")
suite = prj.text("suite")
name = prj.text("name", default=defs, key="name")
apt_sources = get_sources_list(prj, defs)
apt_keys = get_key_list (prj)
target_pkg = get_initrd_pkg(prj, defs)
if virtapt_imported:
v = virtapt.VirtApt( name, arch, suite, apt_sources, "", apt_keys )
d = virtapt.apt_pkg.DepCache(v.cache)
pkg = v.cache[target_pkg]
c=d.get_candidate_ver(pkg)
x=v.source.find_index(c.file_list[0][0])
r=virtapt.apt_pkg.PackageRecords(v.cache)
r.lookup(c.file_list[0])
uri = x.archive_uri(r.filename)
if not x.is_trusted:
return "", uri
return r.sha1_hash, uri
else:
url = "%s://%s/%s" % (prj.text("mirror/primary_proto"),
prj.text("mirror/primary_host"),
prj.text("mirror/primary_path") )
pkg = get_url ( arch, suite, target_pkg, url )
if pkg:
return "", pkg
for n in prj.node("mirror/url-list"):
url = n.text("binary")
urla = url.split()
pkg = get_url ( arch, suite, target_pkg,
urla[0].replace("BUILDHOST", "localhost") )
if pkg:
return "", pkg
return "", ""
def get_dsc_size( fname ):
if not virtapt_imported:
return 0
tf = TagFile( fname )
sz = os.path.getsize(fname)
for sect in tf:
if sect.has_key('Files'):
files = sect['Files'].split('\n')
files = [ f.strip().split(' ') for f in files ]
for f in files:
sz += int(f[1])
return sz
def copy_kinitrd( prj, target_dir, defs, arch="default" ):
try:
sha1, uri = get_initrd_uri(prj, defs, arch)
except KeyError:
raise NoKinitrdException ('no elbe-bootstrap package available')
return
except SystemError:
raise NoKinitrdException ('a configured mirror is not reachable')
return
except CommandError as e:
raise NoKinitrdException ("couldn't download elbe-bootstrap package")
return
tmpdir = mkdtemp()
try:
if uri.startswith("file://"):
os.system( 'cp "%s" "%s"' % ( uri[len("file://"):], os.path.join(tmpdir, "pkg.deb") ) )
elif uri.startswith("http://"):
os.system( 'wget -O "%s" "%s"' % ( os.path.join(tmpdir, "pkg.deb"), uri ) )
elif uri.startswith("ftp://"):
os.system( 'wget -O "%s" "%s"' % ( os.path.join(tmpdir, "pkg.deb"), uri ) )
else:
raise NoKinitrdException ('no elbe-bootstrap package available')
except CommandError as e:
raise NoKinitrdException ("couldn't download elbe-bootstrap package")
return
if len(sha1) > 0:
m = hashlib.sha1()
with open (os.path.join(tmpdir, "pkg.deb"), "rb") as f:
buf = f.read(65536)
while len(buf)>0:
m.update( buf )
buf = f.read(65536)
if m.hexdigest() != sha1:
raise NoKinitrdException ('elbe-bootstrap failed to verify !!!')
else:
print "-----------------------------------------------------"
print "WARNING:"
print "Using untrusted elbe-bootstrap"
print "-----------------------------------------------------"
os.system( 'dpkg -x "%s" "%s"' % ( os.path.join(tmpdir, "pkg.deb"), tmpdir ) )
if prj.has("mirror/cdrom"):
os.system( 'cp "%s" "%s"' % ( os.path.join( tmpdir, 'var', 'lib', 'elbe', 'initrd', 'initrd-cdrom.gz' ), os.path.join(target_dir, "initrd.gz") ) )
else:
os.system( 'cp "%s" "%s"' % ( os.path.join( tmpdir, 'var', 'lib', 'elbe', 'initrd', 'initrd.gz' ), os.path.join(target_dir, "initrd.gz") ) )
os.system( 'cp "%s" "%s"' % ( os.path.join( tmpdir, 'var', 'lib', 'elbe', 'initrd', 'vmlinuz' ), os.path.join(target_dir, "vmlinuz") ) )
os.system( 'rm -r "%s"' % tmpdir )
| gpl-3.0 | 6,152,932,937,203,005,000 | 32.650442 | 154 | 0.575016 | false |
popgengui/negui | agestrucne/pgchromlocifilemanager.py | 1 | 9254 | '''
Description
This class wraps defs to validate a chromosome
loci table file used by LDNe2 to filter out loci
pairs that share a chromosome.
'''
__filename__ = "pgchromlocifilemanager.py"
__date__ = "20180502"
__author__ = "Ted Cosart<[email protected]>"
'''
This string designates that
there is no chrom loci file,
in the case expected by LDNe2:
'''
NO_CHROM_LOCI_FILE="None"
CHROM_TOTAL_ZERO=0
CHROM_LOCI_FILE_DELIMITER="\t"
#Field order in the file
IDX_CHROM_NAME=0
IDX_LOCI_NAME=1
LDNE_LOCI_PAIRING_SCHEME_IGNORE_CHROM=0
LDNE_LOCI_PAIRING_SCHEME_SAME_CHROM=1
LDNE_LOCI_PAIRING_SCHEME_DIFF_CHROM=2
LOCI_PAIRING_SCHEME_DESCRIPT={ LDNE_LOCI_PAIRING_SCHEME_IGNORE_CHROM:"use all pairs",
LDNE_LOCI_PAIRING_SCHEME_SAME_CHROM:"loci pair p1,p2 must be from the same chromosome",
LDNE_LOCI_PAIRING_SCHEME_DIFF_CHROM:"loci pair p1,p2, must be from different chromosomes" }
import os
class GenepopLociScraper( object ):
'''
This is a convenience class to
segregate the code needed just
to get the limited loci info
needed for the ChromLociFileManager.
'''
def __init__( self, s_genepop_file ):
self.__gpfile=s_genepop_file
self.__get_loci_list()
return
#end __init__
def __get_loci_list( self ):
FIRST_LINE=1
POPLINE="pop"
DELIMITER_WHEN_LOCI_ARE_LISTED_ON_ONE_LINE=","
ls_loci_list=[]
o_file=open( self.__gpfile, 'r' )
i_line_number = 0
s_second_line_entry=None
for s_line in o_file:
i_line_number += 1
if i_line_number==FIRST_LINE:
continue
elif i_line_number == 2:
s_second_line_entry=s_line.strip()
#If second line is not only loci line,
#we continue to build our loci list,
#line by line:
ls_loci_list.append( s_line.strip() )
elif s_line.strip().lower() == POPLINE:
if i_line_number == 3:
#all loci were on line 2,
#and entered as a list, so se
#reassign our loci_list thusly:
ls_loci_list=s_second_line_entry.split( \
DELIMITER_WHEN_LOCI_ARE_LISTED_ON_ONE_LINE )
#end if first pop line is file's 3rd line, then loci format is list
break
else:
ls_loci_list.append( s_line.strip() )
#end if first line, else second line, else pop line, else loci line
#end for each linn in file
o_file.close()
self.__loci_list=ls_loci_list
return
#end __get_loci_list
@property
def loci_list( self ):
return self.__loci_list
#end property loci_list
#end class GenepopLociScraper
class ChromLociFileManager( object ):
'''
2018_05_02. This class is created, inititally,
to validate files to be used by LDNe2 to get
chromosome/loci pairs, for use in filtering
loci pairs that share a chromsome. We may
want to put it to other uses later.
Note that it also is the single source for
the string that designates that no such
file is to be used, and which chromosome
totals are invalid (see mod-level assignments).
'''
def __init__( self,
s_file_name=NO_CHROM_LOCI_FILE,
ls_genepop_files_that_use_the_file=[],
i_ldne_pairing_scheme=None ):
self.__filename=s_file_name
'''
Note -- no list.copy() def for python2:
'''
self.__genepop_files=[ v_item for v_item
in ls_genepop_files_that_use_the_file ]
self.__total_chromosomes=None
self.__chromloci_table=None
self.__unlisted_loci=[]
self.__loci_pairing_scheme=i_ldne_pairing_scheme
return
#end __init__
def __validate_file( self ):
s_error_message=""
b_is_valid=False
b_is_file=os.path.isfile( self.__filename )
if b_is_file:
self.__get_total_chromosomes()
b_each_loci_paired_with_one_chromosome=\
self.__each_loci_is_assigned_to_exactly_one_chromosome()
b_all_loci_listed=self.__all_genepop_loci_are_listed()
'''
2018_05_07. The only loci pairing violation detected so far,
occurs when the client has a chrom/loci file that contains just one
chromosome, and also requests the loci pairing sheme that requires
pairs l1,l2, from chrom c1,c2, have c1 != c2.
'''
b_pairing_violation=\
self.__loci_pairing_scheme is not None \
and self.__loci_pairing_scheme \
== LDNE_LOCI_PAIRING_SCHEME_DIFF_CHROM \
and self.__total_chromosomes == 1
if not b_each_loci_paired_with_one_chromosome:
s_error_message += "\nAt least one loci is paired with " \
+ "more than one chromosome." \
if not b_all_loci_listed:
s_error_message += "\n" \
+ " in chrom/loci file, " \
+ self.__filename + ", " \
+ "Genepop file(s) has (have) the " \
+ "following loci not " \
+ "assigned to chromosomes: \n" \
+ str( self.__unlisted_loci )
#end if some loci unlisted
if b_pairing_violation:
s_error_message += "\n" \
+ " in chrom/loci file, " \
+ self.__filename + ", " \
+ " the chromosome total, " \
+ str( self.__total_chromosomes ) \
+ ", is incompatible with the " \
+ "loci pairing scheme: " \
+ LOCI_PAIRING_SCHEME_DESCRIPT[ \
self.__loci_pairing_scheme ]
#end if loci pairing violation
else:
s_error_message="\nFile, " + self.__filename + "does not exist."
#end if we have a chrom/loci file else not
if s_error_message != "":
raise Exception( "In ChromLociFileManager instance, " \
+ "def __validate_file, " \
+ "file found to be invalid with message: " \
+ s_error_message )
#end if we noted an error, raise exception
return
#end __validate_file
def __get_chrom_loci_table( self ):
MIN_NUM_FIELDS=2
o_file=open( self.__filename, 'r' )
self.__chromloci_table={}
for s_line in o_file:
ls_fields=s_line.strip().split( CHROM_LOCI_FILE_DELIMITER )
s_chrom=ls_fields[ IDX_CHROM_NAME ]
if len( ls_fields ) < MIN_NUM_FIELDS:
raise Exception( "In ChromLociFileManager, " \
+ "def __get_chrom_loci_table, " \
+ "a file line has fewer than the " \
+ "required " + str( MIN_NUM_FIELDS ) \
+ " fields for a chrom/loci table file. " \
+ "The file line is: \"" + s_line.strip() + "\"" )
#end if too few fields
s_loci_name=ls_fields[ IDX_LOCI_NAME ]
if s_chrom in self.__chromloci_table:
self.__chromloci_table[ s_chrom ].append( s_loci_name )
else:
self.__chromloci_table[ s_chrom ]=[ s_loci_name ]
#end if chrom already in dict, else add
#end for each line in file
o_file.close()
return
#end __get_chrom_loci_table
def __all_genepop_loci_are_listed( self ):
b_all_listed=False
set_loci_listed_in_chrom_loci_file=self.__get_set_loci_list_from_chrom_loci_file()
i_total_unlisted_loci=0
for s_genepop_file in self.__genepop_files:
ls_loci_in_this_gp_file=\
self.__get_loci_list_from_genepop_file( s_genepop_file )
set_loci_in_this_gp_file=set( ls_loci_in_this_gp_file )
if not( set_loci_in_this_gp_file.issubset( set_loci_listed_in_chrom_loci_file ) ):
set_diff=set_loci_in_this_gp_file.difference( set_loci_listed_in_chrom_loci_file )
i_total_unlisted_loci += len( set_diff )
self.__unlisted_loci += list( set_diff )
#end if gp list not a subset of our table's loci
#end for each genepop file
b_all_listed=( i_total_unlisted_loci==0 )
return b_all_listed
#end __all_genepop_loci_are_listed
def __each_loci_is_assigned_to_exactly_one_chromosome( self ):
b_loci_assignments_valid=True
if self.__chromloci_table is None:
self.__get_chrom_loci_table()
#end if not table, make one
ds_chrom_names_by_loci_name={}
for s_chrom in self.__chromloci_table:
ls_loci=self.__chromloci_table[ s_chrom ]
for s_loci in ls_loci:
if s_loci in ds_chrom_names_by_loci_name:
b_loci_assignments_valid=False
break
else:
ds_chrom_names_by_loci_name[ s_loci ]=s_chrom
#end if loci already paired with a chrom
#end for each loci in this chrom's loci list
#end for each chrom
return b_loci_assignments_valid
#end def __each_loci_is_assigned_to_exactly_one_chromosome
def validateFile( self ):
self.__validate_file()
return
#end validateFile
def __get_loci_list_from_genepop_file( self, s_genepop_file ):
o_gp_loci_scraper=GenepopLociScraper( s_genepop_file )
return o_gp_loci_scraper.loci_list
#end __get_loci_list_from_chrom_loci_file
def __get_set_loci_list_from_chrom_loci_file( self ):
ls_loci_list=[]
set_loci_list=None
if self.__chromloci_table is None:
self.__get_chrom_loci_table()
#end if no table, get it
for s_chrom in self.__chromloci_table:
ls_loci_list +=self.__chromloci_table[ s_chrom ]
#end for each chrom, append loci list
set_loci_list=set( ls_loci_list )
return set_loci_list
#end def __get_loci_list_from_chrom_loci_file
def __get_total_chromosomes( self ):
if self.__total_chromosomes is None:
if self.__chromloci_table is None:
self.__get_chrom_loci_table()
#end if no table
self.__total_chromosomes=len( self.__chromloci_table )
#end if total not yet calc'd
return
#end __get_total_chromosomes
#end class ChromLociFileManager
if __name__ == "__main__":
s_test_file="/home/ted/temp/tclf.tsv"
s_gp="/home/ted/temp/gp.gp"
o_clfm=ChromLociFileManager( s_test_file, [ s_gp ] )
o_clfm.validateFile()
pass
#end if main
| agpl-3.0 | -9,070,952,403,891,394,000 | 25.515759 | 96 | 0.654636 | false |
IQSS/miniverse | dv_apps/slackbot/starterbot.py | 1 | 2042 | import os
import time
from slackclient import SlackClient
# starterbot's ID as an environment variable
BOT_ID = os.environ.get("BOT_ID")
# constants
AT_BOT = "<@" + BOT_ID + ">"
EXAMPLE_COMMAND = "gofish"
# instantiate Slack & Twilio clients
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
def handle_command(command, channel):
"""
Receives commands directed at the bot and determines if they
are valid commands. If so, then acts on the commands. If not,
returns back what it needs for clarification.
"""
response = "Not sure what you mean. Use the *" + EXAMPLE_COMMAND + \
"* command with numbers, delimited by spaces."
if command.startswith(EXAMPLE_COMMAND):
response = "Sure...write some more code then I can do that!"
slack_client.api_call("chat.postMessage", channel=channel,
text=response, as_user=True)
def parse_slack_output(slack_rtm_output):
"""
The Slack Real Time Messaging API is an events firehose.
this parsing function returns None unless a message is
directed at the Bot, based on its ID.
"""
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
if output and 'text' in output and AT_BOT in output['text']:
# return text after the @ mention, whitespace removed
return output['text'].split(AT_BOT)[1].strip().lower(), \
output['channel']
return None, None
if __name__ == "__main__":
READ_WEBSOCKET_DELAY = 1 # 1 second delay between reading from firehose
if slack_client.rtm_connect():
print("StarterBot connected and running!")
while True:
command, channel = parse_slack_output(slack_client.rtm_read())
if command and channel:
handle_command(command, channel)
time.sleep(READ_WEBSOCKET_DELAY)
else:
print("Connection failed. Invalid Slack token or bot ID?")
| mit | -338,846,921,837,846,000 | 36.814815 | 75 | 0.633203 | false |
bhell/jimi | jimi/jimi/settings_orig.py | 1 | 5206 | # Django settings for jimi project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '54i777i7@8ijabmkbqy9xu%67*%srlw0*p7!jb#0(+6%d-&uxu'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'jimi.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'jimi.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| bsd-3-clause | -6,768,545,711,191,450,000 | 33.476821 | 101 | 0.680369 | false |
mattduan/proof | driver/test/testMySQLConnection.py | 1 | 1469 | """
A testcase for MySQLConnection.
"""
import proof.driver.MySQLConnection as MySQLConnection
import proof.driver.MySQLCursor as MySQLCursor
import proof.driver.MySQLDictCursor as MySQLDictCursor
import unittest
class testMySQLConnection(unittest.TestCase):
def setUp(self):
self.__host = 'localhost'
self.__username = 'test'
self.__password = 'test'
self.__db = 'test'
self.con = MySQLConnection.MySQLConnection( host = self.__host,
user = self.__username,
passwd = self.__password,
db = self.__db )
def tearDown(self):
self.con.close()
self.con = None
def testCommit(self):
self.con.commit()
self.assert_(True)
def testRollback(self):
self.con.rollback()
self.assert_(True)
def testAutoCommit(self):
auto_commit = self.con.getAutoCommit()
self.assert_( auto_commit )
self.con.setAutoCommit(False)
auto_commit = self.con.getAutoCommit()
self.assert_( not auto_commit )
def testCursor(self):
cursor = self.con.getCursor()
self.assert_(cursor.__class__.__name__ == 'MySQLCursor')
dictcursor = self.con.getCursor(ret_dict=1)
self.assert_(dictcursor.__class__.__name__ == 'MySQLDictCursor')
| bsd-3-clause | 1,739,310,116,739,996,700 | 27.803922 | 77 | 0.554118 | false |
globocom/database-as-a-service | dbaas/physical/migrations/0112_auto__add_ip.py | 1 | 36235 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Ip'
db.create_table(u'physical_ip', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('identifier', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('address', self.gf('django.db.models.fields.CharField')(max_length=200)),
('instance', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['physical.Instance'], null=True, on_delete=models.SET_NULL, blank=True)),
))
db.send_create_signal(u'physical', ['Ip'])
def backwards(self, orm):
# Deleting model 'Ip'
db.delete_table(u'physical_ip')
models = {
u'account.organization': {
'Meta': {'object_name': 'Organization'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'external': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grafana_datasource': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'grafana_endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'grafana_hostgroup': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'grafana_orgid': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'account.team': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Team'},
'contacts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_alocation_limit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'team_organization'", 'on_delete': 'models.PROTECT', 'to': u"orm['account.Organization']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'physical.cloud': {
'Meta': {'object_name': 'Cloud'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'backup_hour': ('django.db.models.fields.IntegerField', [], {}),
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'engine_patch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EnginePatch']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'maintenance_day': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'maintenance_window': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'pool': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'infra'", 'null': 'True', 'to': u"orm['physical.Pool']"}),
'ssl_configured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ssl_mode': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.databaseinfraparameter': {
'Meta': {'unique_together': "((u'databaseinfra', u'parameter'),)", 'object_name': 'DatabaseInfraParameter'},
'applied_on_database': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.DatabaseInfra']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Parameter']"}),
'reset_default_value': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'major_version': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'minor_version': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginepatch': {
'Meta': {'object_name': 'EnginePatch'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'patchs'", 'to': u"orm['physical.Engine']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_initial_patch': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'patch_path': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'patch_version': ('django.db.models.fields.PositiveIntegerField', [], {}),
'required_disk_size_gb': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'cloud': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'environment_cloud'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Cloud']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provisioner': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'stage': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environmentgroup': {
'Meta': {'object_name': 'EnvironmentGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'groups'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'offering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Offering']", 'null': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'root_size_gb': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ssl_expire_at': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shard': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'total_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'physical.ip': {
'Meta': {'object_name': 'Ip'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.offering': {
'Meta': {'object_name': 'Offering'},
'cpus': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'offerings'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory_size_mb': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.parameter': {
'Meta': {'ordering': "(u'engine_type__name', u'name')", 'unique_together': "((u'name', u'engine_type'),)", 'object_name': 'Parameter'},
'allowed_values': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_method': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dynamic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'enginetype'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'migrate_engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'migrate_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'persistense_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_persisted_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'stronger_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'main_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'weaker_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'weaker_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.pool': {
'Meta': {'object_name': 'Pool'},
'cluster_endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cluster_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cluster_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dbaas_token': ('django.db.models.fields.CharField', [], {'max_length': '406'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'pools'", 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'project_id': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'rancher_endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'rancher_token': ('django.db.models.fields.CharField', [], {'max_length': '406'}),
'storageclass': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'pools'", 'symmetrical': 'False', 'to': u"orm['account.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'vpc': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'can_change_parameters': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_clone_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recreate_slave': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_reinstall_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_resize_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_setup_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_switch_master': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_upgrade_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'replication_topologies'", 'blank': 'True', 'to': u"orm['physical.Parameter']"}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'replication_topologies'", 'null': 'True', 'to': u"orm['physical.Script']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.script': {
'Meta': {'object_name': 'Script'},
'configuration': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'configure_log': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initialization': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'metric_collector': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_database': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'start_replication': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.topologyparametercustomvalue': {
'Meta': {'unique_together': "((u'topology', u'parameter'),)", 'object_name': 'TopologyParameterCustomValue'},
'attr_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'topology_custom_values'", 'to': u"orm['physical.Parameter']"}),
'topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'param_custom_values'", 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.vip': {
'Meta': {'object_name': 'Vip'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'infra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'vips'", 'to': u"orm['physical.DatabaseInfra']"}),
'original_vip': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Vip']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.volume': {
'Meta': {'object_name': 'Volume'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'volumes'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'total_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['physical'] | bsd-3-clause | 7,484,833,559,263,377,000 | 92.391753 | 239 | 0.560977 | false |
vlegoff/tsunami | src/primaires/scripting/extensions/selection.py | 1 | 8145 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module contenant la classe Selection, détaillée plus bas."""
from textwrap import dedent
from primaires.interpreteur.editeur.selection import Selection as EdtSelection
from primaires.interpreteur.editeur.selection_objet import SelectionObjet
from primaires.scripting.editeurs.edt_script import EdtScript
from primaires.scripting.extensions.base import Extension
from primaires.scripting.script import Script
class Selection(Extension):
"""Classe représentant le type éditable 'selection'.
Ce type utilise l'éditeur SelectionObjet. Il permet de
sélectionner aucune, un ou plusieurs valeurs.
"""
extension = "selection"
aide = "une liste de zéro, une ou plusieurs possibilités"
nom_scripting = "la sélection d'éditeur"
def __init__(self, structure, nom):
Extension.__init__(self, structure, nom)
self.selection = []
self.script = ScriptSelection(self)
@property
def editeur(self):
"""Retourne le type d'éditeur."""
return SelectionObjet
@property
def arguments(self):
"""Retourne les arguments de l'éditeur."""
evt = self.script["selection"]
if evt.nb_lignes:
evt.executer()
cles = evt.espaces.variables["retour"]
evt = self.script["valeurs"]
if evt.nb_lignes:
evt.executer()
valeurs = evt.espaces.variables["retour"]
else:
valeurs = list(cles)
else:
cles = valeurs = self.selection
selection = dict(zip(cles, valeurs))
return (selection, )
def etendre_editeur(self, presentation):
"""Ëtend l'éditeur en fonction du type de l'extension."""
# Selection
selection = presentation.ajouter_choix("valeurs", "v", EdtSelection,
self, "selection")
selection.parent = presentation
selection.apercu = "{valeur}"
selection.aide_courte = dedent("""
Entrez |ent|ue valeur|ff| pour l'ajouter ou le retirer.
Entrez |cmd|/|ff| pour revenir à la fenêtre parente.
Cet éditeur existe pour laisser le joueur choisir entre
séro, une ou plusieurs valeurs parmi une liste. On parle
de sélection, car le joueur sélectionne certaines
informations. La liste de valeurs peut être très
simple : par exemple, on demande au joueur de choisir les noms
de villes qu'il fréquente régulièrement : le joueur
peut en choisir aucune, une ou plusieurs. La case de la
structure contiendra la liste des valeurs sélectionnées par
le joueur. Dans ce cas, vous pouvez entrer directement les
valeurs possibles pour les ajouter dans la liste des choix
proposés par l'éditeur.
Parfois cependant, on a besoin d'offrir un choix plus complexe.
Par exemple, entrer un ou plusieur noms de joueurs (la liste
des joueurs étant dynamiquement générée, pas statique).
Dans ce cas, on peut utiliser les deux évènements définis
dans le script de cet éditeur : l'évènement 'selection'
doit retourner une liste des choix possibles. Par exemple,
dans ce cas, une liste des noms de joueurs. L'évènement
'valeurs' permet de faire correspondre chaque choix
de la liste avec une valeur de remplacement : dans le
cas qui nous occupe, le joueur rentre le nom du ou des
joueurs, mais le systhème fait la correspondance avec
les joueur (les personnages sont écrits dans la structure, pas la
chaîne de caractères contenant leur nom). Ces scripts sont
donc bien plus puissants qu'une liste statique, mais peuvent
s'avérer complexes à utiliser.
La liste statique définie ici n'est utilisée que si
l'évènement 'selection' est vide.
Si l'évènement 'selection' existe mais que l'évènement
'valeurs' est vide, les chaînes de caractères sont ajoutées
dans la liste (il n'y a pas de remplacement d'effectué).
Valeurs autorisées : {valeur}""".strip("\n"))
# Script
scripts = presentation.ajouter_choix("scripts", "sc", EdtScript,
self.script)
scripts.parent = presentation
class ScriptSelection(Script):
"""Définition des sélection scriptables."""
def init(self):
"""Initialisation du script."""
# Événement selection
evt_selection = self.creer_evenement("selection")
evt_selection.aide_courte = "la liste des choix scriptables"
evt_selection.aide_longue = \
"Cet évènement est appelé pour déterminer les choix possibles " \
"que le joueur dans l'éditeur pourra sélectionner. Une " \
"variable |ent|retour|ff| doit être créée dans cet évènement, " \
"contenant une liste de chaînes. Le joueur dans l'éditeur " \
"pourra choisir aucune, une ou plusieurs des valeurs se " \
"trouvant dans cette liste. L'évènement 'valeurs' permet de " \
"configurer de façon encore plus précise ce qui sera conservé " \
"dans la structure."
# Événement valeurs
evt_valeurs = self.creer_evenement("valeurs")
evt_valeurs.aide_courte = "la liste des valeurs correspondantes"
evt_valeurs.aide_longue = \
"Cet évènement est couplé à l'évènement 'selection' pour " \
"déterminer les choix possibles et leur valeur respective. " \
"Quand le joueur dans l'éditeur entrera l'un des choix " \
"(une des chaînes contenues dans la liste de la variable " \
"|ent|retour|ff| de l'évènement 'selection'), le système " \
"recherchera la même case de la liste contenue dans la " \
"variable |ent|retour|ff| de l'évènement 'valeurs'. Ainsi, " \
"cet évènement doit contenir dans le même ordre que ''selection' " \
"les valeurs correspondantes. Si 'selection' contient une liste " \
"de noms de joueurs, l'évènement 'valeurs' doit contenir " \
"la liste des joueurs correspondants dans le même ordre. " \
"Quand le joueur dans l'éditeur entrera un nom de joueur, " \
"la structure sera modifiée pour contenir le joueur (et " \
"non pas son nom)."
| bsd-3-clause | 178,633,146,605,590,900 | 45.732558 | 80 | 0.663971 | false |
johnkeates/statsite | sinks/graphite.py | 1 | 5895 | """
Supports flushing metrics to graphite
"""
import re
import sys
import socket
import logging
import pickle
import struct
# Initialize the logger
logging.basicConfig()
SPACES = re.compile(r"\s+")
SLASHES = re.compile(r"\/+")
NON_ALNUM = re.compile(r"[^a-zA-Z_\-0-9\.]")
class GraphiteStore(object):
def __init__(self, host="localhost", port=2003, prefix="statsite.", attempts=3,
protocol='lines', normalize=None):
"""
Implements an interface that allows metrics to be persisted to Graphite.
Raises a :class:`ValueError` on bad arguments.
:Parameters:
- `host` : The hostname of the graphite server.
- `port` : The port of the graphite server
- `prefix` (optional) : A prefix to add to the keys. Defaults to 'statsite.'
- `attempts` (optional) : The number of re-connect retries before failing.
- `normalize` (optional) : If set, attempt to sanitize/normalize keys to be more
generally compliant with graphite/carbon expectations.
"""
# Convert the port to an int since its coming from a configuration file
port = int(port)
attempts = int(attempts)
if port <= 0:
raise ValueError("Port must be positive!")
if attempts < 1:
raise ValueError("Must have at least 1 attempt!")
if protocol not in ["pickle", "lines"]:
raise ValueError("Supported protocols are pickle, lines")
if normalize is not None and normalize not in ("False", "false", "No", "no"):
self.normalize_func = self.normalize_key
else:
self.normalize_func = lambda k: "%s%s" % (self.prefix, k)
self.logger = logging.getLogger("statsite.graphitestore")
self.host = host
self.port = port
self.prefix = prefix
self.attempts = attempts
self.sock = self._create_socket()
self.flush = self.flush_pickle if protocol == "pickle" else self.flush_lines
self.metrics = []
def normalize_key(self, key):
"""
Take a single key string and return the same string with spaces, slashes and
non-alphanumeric characters subbed out and prefixed by self.prefix.
"""
key = SPACES.sub("_", key)
key = SLASHES.sub("-", key)
key = NON_ALNUM.sub("", key)
key = "%s%s" % (self.prefix, key)
return key
def append(self, metric):
"""
Add one metric to queue for sending. Addtionally modify key to be compatible with txstatsd
format.
:Parameters:
- `metric` : A single statsd metric string in the format "key|value|timestamp".
"""
if metric and metric.count("|") == 2:
k, v, ts = metric.split("|")
k = self.normalize_func(k)
self.metrics.append(((k), v, ts))
def flush_lines(self):
"""
Flushes the metrics provided to Graphite.
"""
if not self.metrics:
return
lines = ["%s %s %s" % metric for metric in self.metrics]
data = "\n".join(lines) + "\n"
# Serialize writes to the socket
try:
self._write_metric(data)
except StandardError:
self.logger.exception("Failed to write out the metrics!")
def flush_pickle(self):
"""
Flushes the metrics provided to Graphite.
"""
if not self.metrics:
return
# transform a list of strings into the list of tuples that
# pickle graphite interface supports, in the form of
# (key, (timestamp, value))
# http://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-pickle-protocol
metrics_fmt = []
for (k, v, ts) in self.metrics:
metrics_fmt.append((k, (ts, v)))
# do pickle the list of tuples
# add the header the pickle protocol wants
payload = pickle.dumps(metrics_fmt, protocol=2)
header = struct.pack("!L", len(payload))
message = header + payload
try:
self._write_metric(message)
except StandardError:
self.logger.exception("Failed to write out the metrics!")
def close(self):
"""
Closes the connection. The socket will be recreated on the next
flush.
"""
try:
if self.sock:
self.sock.close()
except StandardError:
self.logger.warning("Failed to close connection!")
def _create_socket(self):
"""Creates a socket and connects to the graphite server"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((self.host, self.port))
except StandardError:
self.logger.error("Failed to connect!")
sock = None
return sock
def _write_metric(self, metric):
"""Tries to write a string to the socket, reconnecting on any errors"""
for _ in xrange(self.attempts):
if self.sock:
try:
self.sock.sendall(metric)
return
except socket.error:
self.logger.exception("Error while flushing to graphite. Reattempting...")
self.sock = self._create_socket()
self.logger.critical("Failed to flush to Graphite! Gave up after %d attempts.",
self.attempts)
def main():
# Intialize from our arguments
graphite = GraphiteStore(*sys.argv[1:])
# Get all the inputs
while True:
try:
graphite.append(raw_input().strip())
except EOFError:
break
# Flush
graphite.logger.info("Outputting %d metrics", len(graphite.metrics))
graphite.flush()
graphite.close()
if __name__ == "__main__":
main()
| bsd-3-clause | -8,307,928,939,181,115,000 | 31.39011 | 98 | 0.575403 | false |
itielshwartz/BackendApi | lib/googleapiclient/schema.py | 1 | 10198 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Schema processing for discovery based APIs
Schemas holds an APIs discovery schemas. It can return those schema as
deserialized JSON objects, or pretty print them as prototype objects that
conform to the schema.
For example, given the schema:
schema = \"\"\"{
"Foo": {
"type": "object",
"properties": {
"etag": {
"type": "string",
"description": "ETag of the collection."
},
"kind": {
"type": "string",
"description": "Type of the collection ('calendar#acl').",
"default": "calendar#acl"
},
"nextPageToken": {
"type": "string",
"description": "Token used to access the next
page of this result. Omitted if no further results are available."
}
}
}
}\"\"\"
s = Schemas(schema)
print s.prettyPrintByName('Foo')
Produces the following output:
{
"nextPageToken": "A String", # Token used to access the
# next page of this result. Omitted if no further results are available.
"kind": "A String", # Type of the collection ('calendar#acl').
"etag": "A String", # ETag of the collection.
},
The constructor takes a discovery document in which to look up named schema.
"""
from __future__ import absolute_import
import six
# TODO(jcgregorio) support format, enum, minimum, maximum
__author__ = '[email protected] (Joe Gregorio)'
import copy
from oauth2client import util
class Schemas(object):
"""Schemas for an API."""
def __init__(self, discovery):
"""Constructor.
Args:
discovery: object, Deserialized discovery document from which we pull
out the named schema.
"""
self.schemas = discovery.get('schemas', {})
# Cache of pretty printed schemas.
self.pretty = {}
@util.positional(2)
def _prettyPrintByName(self, name, seen=None, dent=0):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
if name in seen:
# Do not fall into an infinite loop over recursive definitions.
return '# Object with schema name: %s' % name
seen.append(name)
if name not in self.pretty:
self.pretty[name] = _SchemaToStruct(self.schemas[name],
seen, dent=dent).to_str(self._prettyPrintByName)
seen.pop()
return self.pretty[name]
def prettyPrintByName(self, name):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintByName(name, seen=[], dent=1)[:-2]
@util.positional(2)
def _prettyPrintSchema(self, schema, seen=None, dent=0):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
return _SchemaToStruct(schema, seen, dent=dent).to_str(self._prettyPrintByName)
def prettyPrintSchema(self, schema):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintSchema(schema, dent=1)[:-2]
def get(self, name):
"""Get deserialized JSON schema from the schema name.
Args:
name: string, Schema name.
"""
return self.schemas[name]
class _SchemaToStruct(object):
"""Convert schema to a prototype object."""
@util.positional(3)
def __init__(self, schema, seen, dent=0):
"""Constructor.
Args:
schema: object, Parsed JSON schema.
seen: list, List of names of schema already seen while parsing. Used to
handle recursive definitions.
dent: int, Initial indentation depth.
"""
# The result of this parsing kept as list of strings.
self.value = []
# The final value of the parsing.
self.string = None
# The parsed JSON schema.
self.schema = schema
# Indentation level.
self.dent = dent
# Method that when called returns a prototype object for the schema with
# the given name.
self.from_cache = None
# List of names of schema already seen while parsing.
self.seen = seen
def emit(self, text):
"""Add text as a line to the output.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text, '\n'])
def emitBegin(self, text):
"""Add text to the output, but with no line terminator.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text])
def emitEnd(self, text, comment):
"""Add text and comment to the output with line terminator.
Args:
text: string, Text to output.
comment: string, Python comment.
"""
if comment:
divider = '\n' + ' ' * (self.dent + 2) + '# '
lines = comment.splitlines()
lines = [x.rstrip() for x in lines]
comment = divider.join(lines)
self.value.extend([text, ' # ', comment, '\n'])
else:
self.value.extend([text, '\n'])
def indent(self):
"""Increase indentation level."""
self.dent += 1
def undent(self):
"""Decrease indentation level."""
self.dent -= 1
def _to_str_impl(self, schema):
"""Prototype object based on the schema, in Python code with comments.
Args:
schema: object, Parsed JSON schema file.
Returns:
Prototype object based on the schema, in Python code with comments.
"""
stype = schema.get('type')
if stype == 'object':
self.emitEnd('{', schema.get('description', ''))
self.indent()
if 'properties' in schema:
for pname, pschema in six.iteritems(schema.get('properties', {})):
self.emitBegin('"%s": ' % pname)
self._to_str_impl(pschema)
elif 'additionalProperties' in schema:
self.emitBegin('"a_key": ')
self._to_str_impl(schema['additionalProperties'])
self.undent()
self.emit('},')
elif '$ref' in schema:
schemaName = schema['$ref']
description = schema.get('description', '')
s = self.from_cache(schemaName, seen=self.seen)
parts = s.splitlines()
self.emitEnd(parts[0], description)
for line in parts[1:]:
self.emit(line.rstrip())
elif stype == 'boolean':
value = schema.get('default', 'True or False')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'string':
value = schema.get('default', 'A String')
self.emitEnd('"%s",' % str(value), schema.get('description', ''))
elif stype == 'integer':
value = schema.get('default', '42')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'number':
value = schema.get('default', '3.14')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'null':
self.emitEnd('None,', schema.get('description', ''))
elif stype == 'any':
self.emitEnd('"",', schema.get('description', ''))
elif stype == 'array':
self.emitEnd('[', schema.get('description'))
self.indent()
self.emitBegin('')
self._to_str_impl(schema['items'])
self.undent()
self.emit('],')
else:
self.emit('Unknown type! %s' % stype)
self.emitEnd('', '')
self.string = ''.join(self.value)
return self.string
def to_str(self, from_cache):
"""Prototype object based on the schema, in Python code with comments.
Args:
from_cache: callable(name, seen), Callable that retrieves an object
prototype for a schema with the given name. Seen is a list of schema
names already seen as we recursively descend the schema definition.
Returns:
Prototype object based on the schema, in Python code with comments.
The lines of the code will all be properly indented.
"""
self.from_cache = from_cache
return self._to_str_impl(self.schema)
| apache-2.0 | -364,215,873,647,700,700 | 31.58147 | 96 | 0.581683 | false |
macourteau/scripts | chromium/sublime/find_owners.py | 1 | 2139 | """Sublime Text plugin to find the Chromium OWNERS for the current file.
In a Chromium checkout, this will search for the closest OWNERS file and list
its contents. Select an entry to copy to the clipboard. You can also open the
displayed OWNERS file, or walk up the directory tree to the next OWNERS file.
"""
import os
import sublime
import sublime_plugin
class FindOwnersCommand(sublime_plugin.WindowCommand):
"""Implements the Find Owners command."""
def run(self):
self.find_owners(self.window.active_view().file_name())
def find_owners(self, start_path):
current_directory = start_path
while True:
new_directory = os.path.dirname(current_directory)
if new_directory == current_directory:
sublime.error_message('No OWNERS file found for "%s".'% start_path)
return
current_directory = new_directory
current_owners_file_path = os.path.join(current_directory, 'OWNERS')
if os.path.exists(current_owners_file_path):
self.last_directory = current_directory
self.owners_file_path = current_owners_file_path
with open(self.owners_file_path, 'r') as owners_file:
sublime.status_message('Found OWNERS file: "%s".' %
self.owners_file_path)
data = owners_file.read()
self.lines = data.strip().split('\n')
self.lines.insert(0, '[Show parent OWNERS file]')
self.lines.insert(1, '[Open this OWNERS file]')
self.lines.insert(2, '----- (select owner below to copy) -----')
self.window.show_quick_panel(self.lines,
self.on_select,
sublime.MONOSPACE_FONT)
return
def on_select(self, index):
# Show parent OWNERS file.
if index == 0:
self.find_owners(self.last_directory)
# Open this OWNERS file.
elif index == 1:
self.window.open_file(self.owners_file_path)
# Copy this line to clipboard.
elif index > 2:
sublime.set_clipboard(self.lines[index])
sublime.status_message('Copied "%s" to clipboard.' % self.lines[index])
| mit | -6,741,106,143,749,502,000 | 38.611111 | 77 | 0.636746 | false |
toidi/hadoop-yarn-api-python-client | tests/test_history_server.py | 1 | 3978 | # -*- coding: utf-8 -*-
from mock import patch
from tests import TestCase
from yarn_api_client.history_server import HistoryServer
from yarn_api_client.errors import IllegalArgumentError
@patch('yarn_api_client.history_server.HistoryServer.request')
class HistoryServerTestCase(TestCase):
def setUp(self):
self.hs = HistoryServer('localhost')
@patch('yarn_api_client.history_server.get_jobhistory_endpoint')
def test__init__(self, get_config_mock, request_mock):
get_config_mock.return_value = None
HistoryServer()
get_config_mock.assert_called_with()
def test_application_information(self, request_mock):
self.hs.application_information()
request_mock.assert_called_with('/ws/v1/history/info')
def test_jobs(self, request_mock):
self.hs.jobs()
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs', params={})
self.hs.jobs(state='NEW', user='root', queue='high', limit=100,
started_time_begin=1, started_time_end=2,
finished_time_begin=3, finished_time_end=4)
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs',
params={"queue": 'high',
"state": 'NEW',
"user": 'root',
"limit": 100,
"startedTimeBegin": 1,
"startedTimeEnd": 2,
"finishedTimeBegin": 3,
"finishedTimeEnd": 4})
with self.assertRaises(IllegalArgumentError):
self.hs.jobs(state='ololo')
def test_job(self, request_mock):
self.hs.job('job_100500')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_100500')
def test_job_attempts(self, request_mock):
self.hs.job_attempts('job_1')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_1/jobattempts')
def test_job_counters(self, request_mock):
self.hs.job_counters('job_2')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_2/counters')
def test_job_conf(self, request_mock):
self.hs.job_conf('job_2')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_2/conf')
def test_job_tasks(self, request_mock):
self.hs.job_tasks('job_2')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_2/tasks', params={})
self.hs.job_tasks('job_2', job_type='m')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_2/tasks', params={"type": 'm'})
with self.assertRaises(IllegalArgumentError):
self.hs.job_tasks('job_2', job_type='ololo')
def test_job_task(self, request_mock):
self.hs.job_task('job_2', 'task_3')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_2/tasks/task_3')
def test_task_counters(self, request_mock):
self.hs.task_counters('job_2', 'task_3')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_2/tasks/task_3/counters')
def test_task_attempts(self, request_mock):
self.hs.task_attempts('job_2', 'task_3')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_2/tasks/task_3/attempts')
def test_task_attempt(self, request_mock):
self.hs.task_attempt('job_2', 'task_3', 'attempt_4')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_2/tasks/task_3/attempts/attempt_4')
def test_task_attempt_counters(self, request_mock):
self.hs.task_attempt_counters('job_2', 'task_3', 'attempt_4')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_2/tasks/task_3/attempts/attempt_4/counters')
| bsd-3-clause | 2,893,879,876,168,801,000 | 44.204545 | 119 | 0.602815 | false |
kannon92/psi4 | psi4/share/psi4/scripts/vmd_cube.py | 1 | 12482 | #!/usr/bin/env python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2016 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
# Francesco Evangelista
# Emory University
from __future__ import print_function
import argparse
import sys
import re
import subprocess
import os
import datetime
from os import listdir, environ
from os.path import isfile, join
vmd_cube_help = """vmd_cube is a script to render cube files with vmd.
To generate cube files with Psi4, add the command cubeprop(wfn) at the end
of your input file, where *wfn* is a Wavefunction object that may be
retrieved from any calculation and used following the pattern "ene, wave =
energy('pbe', return_wfn=True)\\n cubeprop(wave)"."""
vmd_exe = ""
vmd_script_name = "vmd_mo_script.vmd"
vmd_template = """#
# VMD script to plot MOs from cube files
#
# Load the molecule and change the atom style
mol load cube PARAM_CUBEFILE.cube
mol modcolor 0 PARAM_CUBENUM Element
mol modstyle 0 PARAM_CUBENUM CPK 0.400000 0.40000 30.000000 16.000000
# Define the material
material change ambient Opaque 0.310000
material change diffuse Opaque 0.720000
material change specular Opaque 0.500000
material change shininess Opaque 0.480000
material change opacity Opaque 1.000000
material change outline Opaque 0.000000
material change outlinewidth Opaque 0.000000
material change transmode Opaque 0.000000
material change specular Opaque 0.750000
material change ambient EdgyShiny 0.310000
material change diffuse EdgyShiny 0.720000
material change shininess EdgyShiny 1.0000
material change opacity EdgyShiny PARAM_OPACITY
# Customize atom colors
color Element C silver
color Element H white
# Rotate and translate the molecule
rotate x by PARAM_RX
rotate y by PARAM_RY
rotate z by PARAM_RZ
translate by PARAM_TX PARAM_TY PARAM_TZ
scale by PARAM_SCALE
# Eliminate the axis and perfect the view
axes location Off
display projection Orthographic
display depthcue off
color Display Background white"""
vmd_template_surface = """#
# Add the surfaces
mol color ColorID PARAM_SURF1ID
mol representation Isosurface PARAM_ISOVALUE1 0 0 0 1 1
mol selection all
mol material EdgyShiny
mol addrep PARAM_CUBENUM
mol color ColorID PARAM_SURF2ID
mol representation Isosurface PARAM_ISOVALUE2 0 0 0 1 1
mol selection all
mol material EdgyShiny
mol addrep PARAM_CUBENUM
# Render
render TachyonInternal PARAM_CUBEFILE.tga
mol delete PARAM_CUBENUM
"""
vmd_template_rotate = """
light 1 off
light 0 rot y 30.0
light 0 rot x -30.0
"""
default_path = os.getcwd()
# Default parameters
options = {"SURF1ID" : [None,"Surface1 Color Id"],
"SURF2ID" : [None,"Surface2 Color Id"],
"ISOVALUE1" : [None,"Isosurface1 Value"],
"ISOVALUE2" : [None,"Isosurface2 Value"],
"RX" : [None,"X-axis Rotation"],
"RY" : [None,"Y-axis Rotation"],
"RZ" : [None,"Z-axis Rotation"],
"TX" : [None,"X-axis Translation"],
"TY" : [None,"Y-axis Translation"],
"TZ" : [None,"Z-axis Translation"],
"OPACITY" : [None,"Opacity"],
"CUBEDIR" : [None,"Cubefile Directory"],
"SCALE" : [None,"Scaling Factor"],
"MONTAGE" : [None,"Montage"],
"FONTSIZE" : [None,"Font size"],
"IMAGESIZE" : [None,"Image size"],
"VMDPATH" : [None,"VMD Path"]}
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def multigsub(subs,str):
for k,v in subs.items():
str = re.sub(k,v,str)
return str
def find_vmd(options):
if environ['VMDPATH']:
vmdpath = environ['VMDPATH']
vmdpath = multigsub({" " : r"\ "},vmdpath)
options["VMDPATH"][0] = vmdpath
else:
print("Please set the VMDPATH environmental variable to the path of VMD.")
exit(1)
def save_setup_command(argv):
file_name = join(default_path, 'vmd_cube_command')
f = open(file_name, 'w')
f.write('# setup command was executed '+datetime.datetime.now().strftime("%d-%B-%Y %H:%M:%S"+"\n"))
f.write(" ".join(argv[:])+"\n")
f.close()
def read_options(options):
parser = argparse.ArgumentParser(description=vmd_cube_help)
parser.add_argument('data', metavar='<cubefile dir>', type=str, nargs='?',default=".",
help='The directory containing the cube files.')
parser.add_argument('--color1', metavar='<integer>', type=int, nargs='?',default=3,
help='the color ID of surface 1 (integer, default = 3)')
parser.add_argument('--color2', metavar='<integer>', type=int, nargs='?',default=23,
help='the color ID of surface 2 (integer, default = 23)')
parser.add_argument('--iso', metavar='<isovalue>', type=float, nargs='?',default=0.05,
help='the isosurface value (float, default = 0.05)')
parser.add_argument('--rx', metavar='<angle>', type=float, nargs='?',default=30.0,
help='the x-axis rotation angle (float, default = 30.0)')
parser.add_argument('--ry', metavar='<angle>', type=float, nargs='?',default=40.0,
help='the y-axis rotation angle (float, default = 40.0)')
parser.add_argument('--rz', metavar='<angle>', type=float, nargs='?',default=15.0,
help='the z-axis rotation angle (float, default = 15.0)')
parser.add_argument('--tx', metavar='<angle>', type=float, nargs='?',default=0.0,
help='the x-axis translation (float, default = 0.0)')
parser.add_argument('--ty', metavar='<angle>', type=float, nargs='?',default=0.0,
help='the y-axis translation (float, default = 0.0)')
parser.add_argument('--tz', metavar='<angle>', type=float, nargs='?',default=0.0,
help='the z-axis translation (float, default = 0.0)')
parser.add_argument('--opacity', metavar='<opacity>', type=float, nargs='?',default=1.0,
help='opacity of the isosurface (float, default = 1.0)')
parser.add_argument('--scale', metavar='<factor>', type=float, nargs='?',default=1.0,
help='the scaling factor (float, default = 1.0)')
parser.add_argument('--montage', const=True, default=False, nargs='?',
help='call montage to combine images. (string, default = false)')
parser.add_argument('--imagesize', metavar='<integer>', type=int, nargs='?',default=250,
help='the size of each image (integer, default = 250)')
parser.add_argument('--fontsize', metavar='<integer>', type=int, nargs='?',default=20,
help='the font size (integer, default = 20)')
args = parser.parse_args()
options["CUBEDIR"][0] = str(args.data)
options["SURF1ID"][0] = str(args.color1)
options["SURF2ID"][0] = str(args.color2)
options["ISOVALUE1"][0] = str(args.iso)
options["ISOVALUE2"][0] = str(-args.iso)
options["RX"][0] = str(args.rx)
options["RY"][0] = str(args.ry)
options["RZ"][0] = str(args.rz)
options["TX"][0] = str(args.tx)
options["TY"][0] = str(args.ty)
options["TZ"][0] = str(args.tz)
options["OPACITY"][0] = str(args.opacity)
options["SCALE"][0] = str(args.scale)
options["MONTAGE"][0] = str(args.montage)
options["FONTSIZE"][0] = str(args.fontsize)
options["IMAGESIZE"][0] = str(args.imagesize)
print("Parameters:")
for k,v in options.items():
print(" %-20s %s" % (v[1],v[0]))
def find_cubes(options):
# Find all the cube files in a given directory
dir = options["CUBEDIR"][0]
sorted_files = []
for f in listdir(options["CUBEDIR"][0]):
if ".cube" in f:
sorted_files.append(f)
return sorted(sorted_files)
def write_and_run_vmd_script(options,cube_files):
vmd_script = open(vmd_script_name,"w+")
vmd_script.write(vmd_template_rotate)
# Define a map that contains all the values of the VMD parameters
replacement_map = {}
for k,v in options.items():
key = "PARAM_" + k.upper()
replacement_map[key] = v[0]
for n,f in enumerate(cube_files):
replacement_map["PARAM_CUBENUM"] = "%03d" % n
replacement_map["PARAM_CUBEFILE"] = options["CUBEDIR"][0] + "/" + f[:-5]
vmd_script_surface = multigsub(replacement_map,vmd_template_surface)
vmd_script_head = multigsub(replacement_map,vmd_template)
vmd_script.write(vmd_script_head + "\n" + vmd_script_surface)
vmd_script.write("quit")
vmd_script.close()
# Call VMD
FNULL = open(os.devnull, 'w')
subprocess.call(("%s -dispdev text -e %s" % (options["VMDPATH"][0],vmd_script_name)),stdout=FNULL, shell=True)
def call_montage(options,cube_files):
if options["MONTAGE"][0] == 'True':
# Optionally, combine all figures into one image using montage
montage_exe = which("montage")
if montage_exe:
alpha_mos = []
beta_mos = []
densities = []
basis_functions = []
for f in cube_files:
tga_file = f[:-5] + ".tga"
if "Psi_a" in f:
alpha_mos.append(tga_file)
if "Psi_b" in f:
beta_mos.append(tga_file)
if "D" in f:
densities.append(tga_file)
if "Phi" in f:
basis_functions.append(tga_file)
# Sort the MOs
sorted_mos = []
for set in [alpha_mos,beta_mos]:
sorted_set = []
for s in set:
s_split = s.split('_')
sorted_set.append((int(s_split[2]),"Psi_a_%s_%s" % (s_split[2],s_split[3])))
sorted_set = sorted(sorted_set)
sorted_mos.append([s[1] for s in sorted_set])
os.chdir(options["CUBEDIR"][0])
for f in sorted_mos[0]:
f_split = f.split('_')
label = "%s\ \(%s\)" % (f_split[3][:-4],f_split[2])
subprocess.call(("montage -pointsize %s -label %s %s -geometry '%sx%s+0+0>' %s" %
(options["FONTSIZE"][0],label,f,options["IMAGESIZE"][0],options["IMAGESIZE"][0],f)), shell=True)
if len(alpha_mos) > 0:
subprocess.call(("%s %s -geometry +2+2 AlphaMOs.tga" % (montage_exe," ".join(sorted_mos[0]))), shell=True)
if len(beta_mos) > 0:
subprocess.call(("%s %s -geometry +2+2 BetaMOs.tga" % (montage_exe," ".join(sorted_mos[1]))), shell=True)
if len(densities) > 0:
subprocess.call(("%s %s -geometry +2+2 Densities.tga" % (montage_exe," ".join(densities))), shell=True)
if len(basis_functions) > 0:
subprocess.call(("%s %s -geometry +2+2 BasisFunctions.tga" % (montage_exe," ".join(basis_functions))), shell=True)
def main(argv):
read_options(options)
find_vmd(options)
save_setup_command(argv)
cube_files = find_cubes(options)
write_and_run_vmd_script(options,cube_files)
call_montage(options,cube_files)
if __name__ == '__main__':
main(sys.argv)
| gpl-2.0 | 6,798,562,547,059,358,000 | 34.971182 | 130 | 0.608476 | false |
gstarnberger/paasta | paasta_tools/check_marathon_services_replication.py | 1 | 17359 | #!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage: ./check_marathon_services_replication.py [options]
This is a script that checks the number of HAProxy backends via Synapse against
the expected amount that should've been deployed via Marathon in a mesos cluster.
Basically, the script checks smartstack.yaml for listed namespaces, and then queries
Synapse for the number of available backends for that namespace. It then goes through
the Marathon service configuration file for that cluster, and sees how many instances
are expected to be available for that namespace based on the number of instances deployed
on that namespace.
After retrieving that information, a fraction of available instances is calculated
(available/expected), and then compared against a threshold. The default threshold is
50, meaning if less than 50% of a service's backends are available, the script sends
CRITICAL. If replication_threshold is defined in the yelpsoa config for a service
instance then it will be used instead.
"""
import argparse
import logging
from datetime import datetime
from datetime import timedelta
import pysensu_yelp
from paasta_tools import marathon_tools
from paasta_tools import mesos_tools
from paasta_tools import monitoring_tools
from paasta_tools.marathon_tools import format_job_id
from paasta_tools.monitoring import replication_utils
from paasta_tools.utils import _log
from paasta_tools.utils import compose_job_id
from paasta_tools.utils import datetime_from_utc_to_local
from paasta_tools.utils import get_services_for_cluster
from paasta_tools.utils import is_under_replicated
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import NoDeploymentsAvailable
log = logging.getLogger(__name__)
def send_event(service, namespace, cluster, soa_dir, status, output):
"""Send an event to sensu via pysensu_yelp with the given information.
:param service: The service name the event is about
:param namespace: The namespace of the service the event is about
:param soa_dir: The service directory to read monitoring information from
:param status: The status to emit for this event
:param output: The output to emit for this event"""
# This function assumes the input is a string like "mumble.main"
monitoring_overrides = marathon_tools.load_marathon_service_config(
service, namespace, cluster).get_monitoring()
if 'alert_after' not in monitoring_overrides:
monitoring_overrides['alert_after'] = '2m'
monitoring_overrides['check_every'] = '1m'
monitoring_overrides['runbook'] = monitoring_tools.get_runbook(monitoring_overrides, service, soa_dir=soa_dir)
check_name = 'check_marathon_services_replication.%s' % compose_job_id(service, namespace)
monitoring_tools.send_event(service, check_name, monitoring_overrides, status, output, soa_dir)
_log(
service=service,
line='Replication: %s' % output,
component='monitoring',
level='debug',
cluster=cluster,
instance=namespace,
)
def parse_args():
epilog = "PERCENTAGE is an integer value representing the percentage of available to expected instances"
parser = argparse.ArgumentParser(epilog=epilog)
parser.add_argument('-d', '--soa-dir', dest="soa_dir", metavar="SOA_DIR",
default=marathon_tools.DEFAULT_SOA_DIR,
help="define a different soa config directory")
parser.add_argument('-v', '--verbose', action='store_true',
dest="verbose", default=False)
options = parser.parse_args()
return options
def check_smartstack_replication_for_instance(
service,
instance,
cluster,
soa_dir,
expected_count,
system_paasta_config,
):
"""Check a set of namespaces to see if their number of available backends is too low,
emitting events to Sensu based on the fraction available and the thresholds defined in
the corresponding yelpsoa config.
:param service: A string like example_service
:param namespace: A nerve namespace, like "main"
:param cluster: name of the cluster
:param soa_dir: The SOA configuration directory to read from
:param system_paasta_config: A SystemPaastaConfig object representing the system configuration.
"""
namespace = marathon_tools.read_namespace_for_service_instance(service, instance, soa_dir=soa_dir)
if namespace != instance:
log.debug("Instance %s is announced under namespace: %s. "
"Not checking replication for it" % (instance, namespace))
return
full_name = compose_job_id(service, instance)
job_config = marathon_tools.load_marathon_service_config(service, instance, cluster)
crit_threshold = job_config.get_replication_crit_percentage()
monitoring_blacklist = job_config.get_monitoring_blacklist()
log.info('Checking instance %s in smartstack', full_name)
smartstack_replication_info = load_smartstack_info_for_service(
service=service,
namespace=namespace,
soa_dir=soa_dir,
blacklist=monitoring_blacklist,
system_paasta_config=system_paasta_config,
)
log.debug('Got smartstack replication info for %s: %s' % (full_name, smartstack_replication_info))
if len(smartstack_replication_info) == 0:
status = pysensu_yelp.Status.CRITICAL
output = ('Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '
'is valid!\n') % full_name
log.error(output)
else:
expected_count_per_location = int(expected_count / len(smartstack_replication_info))
output = ''
under_replication_per_location = []
for location, available_backends in sorted(smartstack_replication_info.iteritems()):
num_available_in_location = available_backends.get(full_name, 0)
under_replicated, ratio = is_under_replicated(
num_available_in_location, expected_count_per_location, crit_threshold)
if under_replicated:
output += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\n' % (
full_name, num_available_in_location, expected_count_per_location, location, ratio)
else:
output += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\n' % (
full_name, num_available_in_location, expected_count_per_location, location, ratio)
under_replication_per_location.append(under_replicated)
if any(under_replication_per_location):
status = pysensu_yelp.Status.CRITICAL
output += (
"\n\n"
"What this alert means:\n"
"\n"
" This replication alert means that a SmartStack powered loadbalancer (haproxy)\n"
" doesn't have enough healthy backends. Not having enough healthy backends\n"
" means that clients of that service will get 503s (http) or connection refused\n"
" (tcp) when trying to connect to it.\n"
"\n"
"Reasons this might be happening:\n"
"\n"
" The service may simply not have enough copies or it could simply be\n"
" unhealthy in that location. There also may not be enough resources\n"
" in the cluster to support the requested instance count.\n"
"\n"
"Things you can do:\n"
"\n"
" * Fix the cause of the unhealthy service. Try running:\n"
"\n"
" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\n"
"\n"
" * Widen SmartStack discovery settings\n"
" * Increase the instance count\n"
"\n"
) % {
'service': service,
'instance': instance,
'cluster': cluster,
}
log.error(output)
else:
status = pysensu_yelp.Status.OK
log.info(output)
send_event(service=service, namespace=instance, cluster=cluster, soa_dir=soa_dir, status=status, output=output)
def get_healthy_marathon_instances_for_short_app_id(client, app_id):
tasks = client.list_tasks()
tasks_for_app = [task for task in tasks if task.app_id.startswith('/%s' % app_id)]
one_minute_ago = datetime.now() - timedelta(minutes=1)
healthy_tasks = []
for task in tasks_for_app:
if marathon_tools.is_task_healthy(task, default_healthy=True) \
and task.started_at is not None \
and datetime_from_utc_to_local(task.started_at) < one_minute_ago:
healthy_tasks.append(task)
return len(healthy_tasks)
def check_healthy_marathon_tasks_for_service_instance(client, service, instance, cluster,
soa_dir, expected_count):
app_id = format_job_id(service, instance)
log.info("Checking %s in marathon as it is not in smartstack" % app_id)
num_healthy_tasks = get_healthy_marathon_instances_for_short_app_id(client, app_id)
send_event_if_under_replication(
service=service,
instance=instance,
cluster=cluster,
expected_count=expected_count,
num_available=num_healthy_tasks,
soa_dir=soa_dir,
)
def send_event_if_under_replication(
service,
instance,
cluster,
expected_count,
num_available,
soa_dir,
):
full_name = compose_job_id(service, instance)
job_config = marathon_tools.load_marathon_service_config(service, instance, cluster)
crit_threshold = job_config.get_replication_crit_percentage()
output = ('Service %s has %d out of %d expected instances available!\n' +
'(threshold: %d%%)') % (full_name, num_available, expected_count, crit_threshold)
under_replicated, _ = is_under_replicated(num_available, expected_count, crit_threshold)
if under_replicated:
output += (
"\n\n"
"What this alert means:\n"
"\n"
" This replication alert means that the service PaaSTA can't keep the\n"
" requested number of copies up and healthy in the cluster.\n"
"\n"
"Reasons this might be happening:\n"
"\n"
" The service may simply unhealthy. There also may not be enough resources\n"
" in the cluster to support the requested instance count.\n"
"\n"
"Things you can do:\n"
"\n"
" * Increase the instance count\n"
" * Fix the cause of the unhealthy service. Try running:\n"
"\n"
" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\n"
) % {
'service': service,
'instance': instance,
'cluster': cluster,
}
log.error(output)
status = pysensu_yelp.Status.CRITICAL
else:
log.info(output)
status = pysensu_yelp.Status.OK
send_event(
service=service,
namespace=instance,
cluster=cluster,
soa_dir=soa_dir,
status=status,
output=output)
def check_service_replication(client, service, instance, cluster, soa_dir, system_paasta_config):
"""Checks a service's replication levels based on how the service's replication
should be monitored. (smartstack or mesos)
:param service: Service name, like "example_service"
:param instance: Instance name, like "main" or "canary"
:param cluster: name of the cluster
:param soa_dir: The SOA configuration directory to read from
:param system_paasta_config: A SystemPaastaConfig object representing the system configuration.
"""
job_id = compose_job_id(service, instance)
try:
expected_count = marathon_tools.get_expected_instance_count_for_namespace(service, instance, soa_dir=soa_dir)
except NoDeploymentsAvailable:
log.debug('deployments.json missing for %s. Skipping replication monitoring.' % job_id)
return
if expected_count is None:
return
log.info("Expecting %d total tasks for %s" % (expected_count, job_id))
proxy_port = marathon_tools.get_proxy_port_for_instance(service, instance, soa_dir=soa_dir)
if proxy_port is not None:
check_smartstack_replication_for_instance(
service=service,
instance=instance,
cluster=cluster,
soa_dir=soa_dir,
expected_count=expected_count,
system_paasta_config=system_paasta_config,
)
else:
check_healthy_marathon_tasks_for_service_instance(
client=client,
service=service,
instance=instance,
cluster=cluster,
soa_dir=soa_dir,
expected_count=expected_count,
)
def load_smartstack_info_for_service(service, namespace, soa_dir, blacklist, system_paasta_config):
"""Retrives number of available backends for given services
:param service_instances: A list of tuples of (service, instance)
:param namespaces: list of Smartstack namespaces
:param blacklist: A list of blacklisted location tuples in the form (location, value)
:param system_paasta_config: A SystemPaastaConfig object representing the system configuration.
:returns: a dictionary of the form
::
{
'location_type': {
'unique_location_name': {
'service.instance': <# ofavailable backends>
},
'other_unique_location_name': ...
}
}
"""
service_namespace_config = marathon_tools.load_service_namespace_config(service, namespace,
soa_dir=soa_dir)
discover_location_type = service_namespace_config.get_discover()
return get_smartstack_replication_for_attribute(
attribute=discover_location_type,
service=service,
namespace=namespace,
blacklist=blacklist,
system_paasta_config=system_paasta_config,
)
def get_smartstack_replication_for_attribute(attribute, service, namespace, blacklist, system_paasta_config):
"""Loads smartstack replication from a host with the specified attribute
:param attribute: a Mesos attribute
:param service: A service name, like 'example_service'
:param namespace: A particular smartstack namespace to inspect, like 'main'
:param constraints: A list of Marathon constraints to restrict which synapse hosts to query
:param blacklist: A list of blacklisted location tuples in the form of (location, value)
:param system_paasta_config: A SystemPaastaConfig object representing the system configuration.
:returns: a dictionary of the form {'<unique_attribute_value>': <smartstack replication hash>}
(the dictionary will contain keys for unique all attribute values)
"""
replication_info = {}
unique_values = mesos_tools.get_mesos_slaves_grouped_by_attribute(attribute=attribute, blacklist=blacklist)
full_name = compose_job_id(service, namespace)
for value, hosts in unique_values.iteritems():
# arbitrarily choose the first host with a given attribute to query for replication stats
synapse_host = hosts[0]
repl_info = replication_utils.get_replication_for_services(
synapse_host=synapse_host,
synapse_port=system_paasta_config.get_synapse_port(),
synapse_haproxy_url_format=system_paasta_config.get_synapse_haproxy_url_format(),
services=[full_name],
)
replication_info[value] = repl_info
return replication_info
def main():
args = parse_args()
soa_dir = args.soa_dir
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
system_paasta_config = load_system_paasta_config()
cluster = system_paasta_config.get_cluster()
service_instances = get_services_for_cluster(
cluster=cluster, instance_type='marathon', soa_dir=args.soa_dir)
config = marathon_tools.load_marathon_config()
client = marathon_tools.get_marathon_client(config.get_url(), config.get_username(), config.get_password())
for service, instance in service_instances:
check_service_replication(
client=client,
service=service,
instance=instance,
cluster=cluster,
soa_dir=soa_dir,
system_paasta_config=system_paasta_config,
)
if __name__ == "__main__":
main()
| apache-2.0 | -9,141,361,211,709,925,000 | 41.23601 | 118 | 0.656029 | false |
kacozg/Alice | core.py | 1 | 1695 | """
How to run scrapers programmatically from a script
"""
import sys
#ubuntu sys.path.insert(0, '/home/kaco/Desktop/alice')
#windows sys.path.insert(0, 'C:/Users/Carlos/Google Drive/ZetUp/Buscato/alice')
sys.path.insert(0, 'C:/alice')
from spiders.abcdin_spider import AbcdinSpider
from spiders.casaximena_spider import CasaximenaSpider
from spiders.corona_spider import CoronaSpider
from spiders.dafiti_spider import DafitiSpider
from spiders.easy_spider import EasySpider
from spiders.falabella_spider import FalabellaSpider
from spiders.hites_spider import HitesSpider
from spiders.lapolar_spider import LapolarSpider
from spiders.linio_spider import LinioSpider
from spiders.paris_spider import ParisSpider
from spiders.pcfactory_spider import PcfactorySpider
from spiders.ripley_spider import RipleySpider
from spiders.sodimac_spider import SodimacSpider
from spiders.zmart_spider import ZmartSpider
import scrapy
from twisted.internet import reactor
from scrapy.crawler import CrawlerProcess
from scrapy.utils.log import configure_logging
from scrapy.utils.project import get_project_settings
configure_logging()
runner = CrawlerProcess(get_project_settings())
runner.crawl(AbcdinSpider)
runner.crawl(CasaximenaSpider)
runner.crawl(CoronaSpider)
runner.crawl(DafitiSpider)
runner.crawl(EasySpider)
runner.crawl(FalabellaSpider)
runner.crawl(HitesSpider)
runner.crawl(LapolarSpider)
runner.crawl(LinioSpider)
runner.crawl(ParisSpider)
runner.crawl(PcfactorySpider)
runner.crawl(RipleySpider)
runner.crawl(SodimacSpider)
runner.crawl(ZmartSpider)
d = runner.join()
d.addBoth(lambda _: reactor.stop())
reactor.run() # the script will block here until all crawling jobs are finished
| apache-2.0 | 3,368,816,912,896,692,000 | 31.596154 | 80 | 0.830088 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.